diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index d8ba919cefb5..d4cdaf2cfe41 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -92,6 +92,7 @@ public class HddsVolume extends StorageVolume { private File dbParentDir; private File deletedContainerDir; private AtomicBoolean dbLoaded = new AtomicBoolean(false); + private final AtomicBoolean dbLoadFailure = new AtomicBoolean(false); /** * Builder for HddsVolume. @@ -257,6 +258,11 @@ public synchronized VolumeCheckResult check(@Nullable Boolean unused) VolumeCheckResult result = super.check(unused); DatanodeConfiguration df = getConf().getObject(DatanodeConfiguration.class); + if (isDbLoadFailure()) { + LOG.warn("Volume {} failed to access RocksDB: RocksDB parent directory is null, " + + "the volume might not have been loaded properly.", getStorageDir()); + return VolumeCheckResult.FAILED; + } if (result != VolumeCheckResult.HEALTHY || !df.getContainerSchemaV3Enabled() || !isDbLoaded()) { return result; @@ -313,6 +319,11 @@ public File getDbParentDir() { return this.dbParentDir; } + @VisibleForTesting + public void setDbParentDir(File dbParentDir) { + this.dbParentDir = dbParentDir; + } + public File getDeletedContainerDir() { return this.deletedContainerDir; } @@ -326,6 +337,10 @@ public boolean isDbLoaded() { return dbLoaded.get(); } + public boolean isDbLoadFailure() { + return dbLoadFailure.get(); + } + public void loadDbStore(boolean readOnly) throws IOException { // DN startup for the first time, not registered yet, // so the DbVolume is not formatted. @@ -363,7 +378,8 @@ public void loadDbStore(boolean readOnly) throws IOException { String containerDBPath = containerDBFile.getAbsolutePath(); try { initPerDiskDBStore(containerDBPath, getConf(), readOnly); - } catch (IOException e) { + } catch (Throwable e) { + dbLoadFailure.set(true); throw new IOException("Can't init db instance under path " + containerDBPath + " for volume " + getStorageID(), e); } @@ -417,9 +433,11 @@ public void createDbStore(MutableVolumeSet dbVolumeSet) throws IOException { try { HddsVolumeUtil.initPerDiskDBStore(containerDBPath, getConf(), false); dbLoaded.set(true); + dbLoadFailure.set(false); LOG.info("SchemaV3 db is created and loaded at {} for volume {}", containerDBPath, getStorageID()); } catch (IOException e) { + dbLoadFailure.set(true); String errMsg = "Can't create db instance under path " + containerDBPath + " for volume " + getStorageID(); LOG.error(errMsg, e); @@ -448,6 +466,7 @@ private void closeDbStore() { .getAbsolutePath(); DatanodeStoreCache.getInstance().removeDB(containerDBPath); dbLoaded.set(false); + dbLoadFailure.set(false); LOG.info("SchemaV3 db is stopped at {} for volume {}", containerDBPath, getStorageID()); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 3c0b6e618ee1..e195b127d499 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -442,12 +442,20 @@ public Map> getVolumeStateMap() { public boolean hasEnoughVolumes() { // Max number of bad volumes allowed, should have at least // 1 good volume + boolean hasEnoughVolumes; if (maxVolumeFailuresTolerated == StorageVolumeChecker.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { - return getVolumesList().size() >= 1; + hasEnoughVolumes = getVolumesList().size() >= 1; } else { - return getFailedVolumesList().size() <= maxVolumeFailuresTolerated; + hasEnoughVolumes = getFailedVolumesList().size() <= maxVolumeFailuresTolerated; } + if (!hasEnoughVolumes) { + LOG.error("Not enough volumes in MutableVolumeSet. DatanodeUUID: {}, VolumeType: {}, " + + "MaxVolumeFailuresTolerated: {}, ActiveVolumes: {}, FailedVolumes: {}", + datanodeUuid, volumeType, maxVolumeFailuresTolerated, + getVolumesList().size(), getFailedVolumesList().size()); + } + return hasEnoughVolumes; } public StorageLocationReport[] getStorageReport() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java index a45055821a41..e966a0bed862 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java @@ -124,15 +124,13 @@ private static String getContainerSubDirectory(long containerId) { */ public static File getContainerDBFile(KeyValueContainerData containerData) { if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + Preconditions.checkNotNull(containerData.getVolume().getDbParentDir(), "Base Directory cannot be null"); return new File(containerData.getVolume().getDbParentDir(), OzoneConsts.CONTAINER_DB_NAME); } - return getContainerDBFile(containerData.getMetadataPath(), containerData); - } - - public static File getContainerDBFile(String baseDir, - KeyValueContainerData containerData) { - return new File(baseDir, containerData.getContainerID() + + Preconditions.checkNotNull(containerData.getMetadataPath(), "Metadata Directory cannot be null"); + return new File(containerData.getMetadataPath(), containerData.getContainerID() + OzoneConsts.DN_CONTAINER_DB); } + } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index eaf901c67a83..e5f6dc7edefd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -55,6 +55,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.Op; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; @@ -73,6 +74,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.time.Duration; import java.util.Collections; import java.util.HashMap; @@ -103,6 +105,9 @@ * Test-cases to verify the functionality of HddsDispatcher. */ public class TestHddsDispatcher { + @TempDir + private Path tempDir; + private static final Logger LOG = LoggerFactory.getLogger( TestHddsDispatcher.class); @TempDir @@ -133,6 +138,8 @@ public void testContainerCloseActionWhenFull( (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString()); Container container = new KeyValueContainer(containerData, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); @@ -267,6 +274,8 @@ public void testContainerCloseActionWhenVolumeFull( 50, UUID.randomUUID().toString(), dd.getUuidString()); Container container = new KeyValueContainer(containerData, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java index d05c127838f1..387997db736d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.volume.DbVolume; @@ -43,7 +44,13 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrowsExactly; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mockStatic; + +import org.mockito.MockedStatic; +import org.mockito.Mockito; + /** * Test for {@link HddsVolumeUtil}. @@ -95,6 +102,34 @@ public void teardown() { dbVolumeSet.shutdown(); } + @Test + public void testLoadHDDVolumeWithInitDBException() + throws Exception { + // Create db instances for all HDDsVolumes. + for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList( + hddsVolumeSet.getVolumesList())) { + hddsVolume.format(clusterId); + hddsVolume.createWorkingDir(clusterId, null); + } + + try (MockedStatic mocked = mockStatic(HddsVolumeUtil.class, Mockito.CALLS_REAL_METHODS)) { + // Simulating the init DB Exception + mocked.when(() -> HddsVolumeUtil.initPerDiskDBStore(Mockito.anyString(), Mockito.any(), Mockito.anyBoolean())) + .thenThrow(new IOException("Mocked Exception")); + + reinitVolumes(); + for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList( + hddsVolumeSet.getVolumesList())) { + assertThrowsExactly(IOException.class, () -> hddsVolume.loadDbStore(true)); + // If the Volume init DB is abnormal, the Volume should be recognized as a failed Volume + assertEquals(VolumeCheckResult.FAILED, hddsVolume.check(false)); + assertTrue(hddsVolume.isDbLoadFailure()); + assertFalse(hddsVolume.isDbLoaded()); + } + } + + } + @Test public void testLoadAllHddsVolumeDbStoreWithoutDbVolumes() throws IOException { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index eb1f7979f8b9..55df5f43b6b8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -21,6 +21,7 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -45,6 +46,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -76,6 +78,9 @@ */ @Timeout(30) public class TestVolumeSetDiskChecks { + @TempDir + private Path tempDir; + public static final Logger LOG = LoggerFactory.getLogger( TestVolumeSetDiskChecks.class); @TempDir @@ -302,11 +307,15 @@ public void testVolumeFailure() throws IOException { dummyChecker); KeyValueContainer container = new KeyValueContainer(data, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); conSet.addContainer(container); KeyValueContainer container1 = new KeyValueContainer(data1, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet1.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container1.create(volumeSet1, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); conSet.addContainer(container1); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index bad3e7ee81db..03901b99be3b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; @@ -143,6 +144,8 @@ public void init(boolean isZeroCopy) throws Exception { ContainerLayoutVersion.FILE_PER_BLOCK, GB, UUID.randomUUID().toString(), datanode.getUuidString()); KeyValueContainer container = new KeyValueContainer(data, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), "test-replication"); containerSet.addContainer(container); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java index 137214aa1cd6..59b88bcbea46 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ContainerReplicationSource; @@ -532,6 +533,8 @@ public void restartDatanode(int expectedMlv, boolean exactMatch) // Start new datanode with the same configuration. dsm = new DatanodeStateMachine(dd, conf); + StorageVolumeUtil.getHddsVolumesList(dsm.getContainer().getVolumeSet().getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); int mlv = dsm.getLayoutVersionManager().getMetadataLayoutVersion(); if (exactMatch) { assertEquals(expectedMlv, mlv); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index c74e274d3d72..6a4cebe9c7a9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -184,9 +184,9 @@ public void testDeletedContainersClearedOnStartup() throws Exception { ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); + OzoneContainer ozoneContainer = createVolume(ozoneConf); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { - OzoneContainer ozoneContainer = createVolume(ozoneConf); HddsVolume hddsVolume = (HddsVolume) ozoneContainer.getVolumeSet() .getVolumesList().get(0); KeyValueContainer kvContainer = addContainer(ozoneConf, hddsVolume); @@ -212,6 +212,8 @@ public void testDeletedContainersClearedOnStartup() throws Exception { hddsVolume.getDeletedContainerDir().listFiles(); assertNotNull(leftoverContainers); assertEquals(0, leftoverContainers.length); + } finally { + ozoneContainer.stop(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index 51943a2e8d23..a4a5701f5491 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.metrics; import java.io.File; +import java.nio.file.Path; import java.util.List; import java.util.Map; import java.util.UUID; @@ -59,12 +60,15 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; /** * Test for metrics published by storage containers. */ @Timeout(300) public class TestContainerMetrics { + @TempDir + private Path tempDir; @Test public void testContainerMetrics() throws Exception { @@ -105,6 +109,8 @@ public void testContainerMetrics() throws Exception { } HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); dispatcher.setClusterId(UUID.randomUUID().toString()); server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 1b8bae0d03a8..1c5da04c0a3e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -30,11 +30,13 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import java.io.File; +import java.nio.file.Path; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -55,6 +57,8 @@ */ @Timeout(300) public class TestOzoneContainer { + @TempDir + private Path tempDir; @Test public void testCreateOzoneContainer( @@ -75,6 +79,8 @@ public void testCreateOzoneContainer( DatanodeDetails datanodeDetails = randomDatanodeDetails(); container = ContainerTestUtils .getOzoneContainer(datanodeDetails, conf); + StorageVolumeUtil.getHddsVolumesList(container.getVolumeSet().getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); //Set clusterId and manually start ozone container. container.start(UUID.randomUUID().toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java index 73910ef00ff1..b05c547b625d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java @@ -38,6 +38,8 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -303,6 +305,9 @@ private OzoneContainer createAndStartOzoneContainerInstance() { StateContext stateContext = ContainerTestUtils.getMockContext(dn, conf); container = new OzoneContainer( dn, conf, stateContext, caClient, keyClient); + MutableVolumeSet volumeSet = container.getVolumeSet(); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); container.start(clusterID); } catch (Throwable e) { if (container != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java index 4f24f8e6c320..5585696dfc31 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java @@ -38,6 +38,8 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ozone.test.GenericTestUtils; @@ -137,6 +139,9 @@ void testCreateOzoneContainer(boolean requireToken, boolean hasToken, DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); container = new OzoneContainer(dn, conf, ContainerTestUtils .getMockContext(dn, conf), caClient, secretKeyClient); + MutableVolumeSet volumeSet = container.getVolumeSet(); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); //Set scmId and manually start ozone container. container.start(UUID.randomUUID().toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index c05f55bd4a74..630c4d314959 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; @@ -57,6 +58,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; @@ -69,6 +71,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; @@ -84,6 +87,8 @@ public class TestContainerServer { .getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClient caClient; + @TempDir + private Path tempDir; @BeforeAll public static void setup() { @@ -182,7 +187,7 @@ static void runTestClientServer( } } - private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); @@ -192,6 +197,8 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index e0522ac6e91d..8044685bb747 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.EnumSet; @@ -65,6 +66,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; @@ -103,6 +105,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; import static org.assertj.core.api.Assertions.assertThat; @@ -115,6 +118,8 @@ * Test Container servers when security is enabled. */ public class TestSecureContainerServer { + @TempDir + private Path tempDir; private static final String TEST_DIR = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); @@ -166,7 +171,7 @@ public void testClientServer() throws Exception { hddsDispatcher, caClient), (dn, p) -> { }, (p) -> { }); } - private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, @@ -175,6 +180,8 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); @@ -199,7 +206,7 @@ public void testClientServerRatisGrpc() throws Exception { runTestClientServerRatis(GRPC, 3); } - static XceiverServerRatis newXceiverServerRatis( + XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); @@ -216,12 +223,12 @@ static XceiverServerRatis newXceiverServerRatis( caClient, null); } - private static void runTestClientServerRatis(RpcType rpc, int numNodes) + private void runTestClientServerRatis(RpcType rpc, int numNodes) throws Exception { runTestClientServer(numNodes, (pipeline, conf) -> RatisTestHelper.initRatisConf(rpc, conf), XceiverClientRatis::newXceiverClientRatis, - TestSecureContainerServer::newXceiverServerRatis, + this::newXceiverServerRatis, (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p), (p) -> { }); }