From 1a3ea172defaf8713b1d84d4997bc1f341d535f0 Mon Sep 17 00:00:00 2001 From: xichen01 Date: Sun, 28 Jan 2024 20:02:03 +0800 Subject: [PATCH 01/10] HDDS-10230 Preventing V3 Schema from Creating Container DB in the Wrong Location --- .../container/common/volume/HddsVolume.java | 77 ++++++++++++------- .../common/volume/MutableVolumeSet.java | 12 ++- .../KeyValueContainerLocationUtil.java | 10 +-- .../common/utils/TestHddsVolumeUtil.java | 35 +++++++++ 4 files changed, 100 insertions(+), 34 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 44bd4cf19a46..835f8e8f6b09 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -92,6 +92,7 @@ public class HddsVolume extends StorageVolume { private File dbParentDir; private File deletedContainerDir; private AtomicBoolean dbLoaded = new AtomicBoolean(false); + private final AtomicBoolean dbLoadFailure = new AtomicBoolean(false); /** * Builder for HddsVolume. @@ -257,14 +258,23 @@ public synchronized VolumeCheckResult check(@Nullable Boolean unused) VolumeCheckResult result = super.check(unused); DatanodeConfiguration df = getConf().getObject(DatanodeConfiguration.class); + if (isDbLoadFailure()) { + LOG.warn("Volume {} failed to access RocksDB: RocksDB parent directory is null, " + + "the volume might not have been loaded properly.", getStorageDir()); + return VolumeCheckResult.FAILED; + } if (result != VolumeCheckResult.HEALTHY || !df.getContainerSchemaV3Enabled() || !isDbLoaded()) { return result; } // Check that per-volume RocksDB is present. - File dbFile = new File(dbParentDir, CONTAINER_DB_NAME); - if (!dbFile.exists() || !dbFile.canRead()) { + File dbFile = dbParentDir == null ? null : new File(dbParentDir, CONTAINER_DB_NAME); + if (dbFile == null || !dbFile.exists() || !dbFile.canRead()) { + if (dbFile == null) { + LOG.warn("Volume {} failed to access RocksDB: RocksDB parent directory is null, " + + "the volume might not have been loaded properly.", getStorageDir()); + } LOG.warn("Volume {} failed health check. Could not access RocksDB at " + "{}", getStorageDir(), dbFile); return VolumeCheckResult.FAILED; @@ -326,6 +336,10 @@ public boolean isDbLoaded() { return dbLoaded.get(); } + public boolean isDbLoadFailure() { + return dbLoadFailure.get(); + } + public void loadDbStore(boolean readOnly) throws IOException { // DN startup for the first time, not registered yet, // so the DbVolume is not formatted. @@ -343,35 +357,43 @@ public void loadDbStore(boolean readOnly) throws IOException { File clusterIdDir = new File(dbVolume == null ? getStorageDir() : dbVolume.getStorageDir(), getClusterID()); - if (!clusterIdDir.exists()) { - throw new IOException("Working dir " + clusterIdDir.getAbsolutePath() + - " not created for HddsVolume: " + getStorageDir().getAbsolutePath()); - } + try { + if (!clusterIdDir.exists()) { + throw new IOException("Working dir " + clusterIdDir.getAbsolutePath() + + " not created for HddsVolume: " + + getStorageDir().getAbsolutePath()); + } - File storageIdDir = new File(clusterIdDir, getStorageID()); - if (!storageIdDir.exists()) { - throw new IOException("Db parent dir " + storageIdDir.getAbsolutePath() + - " not found for HddsVolume: " + getStorageDir().getAbsolutePath()); - } + File storageIdDir = new File(clusterIdDir, getStorageID()); + if (!storageIdDir.exists()) { + throw new IOException( + "Db parent dir " + storageIdDir.getAbsolutePath() + + " not found for HddsVolume: " + + getStorageDir().getAbsolutePath()); + } - File containerDBFile = new File(storageIdDir, CONTAINER_DB_NAME); - if (!containerDBFile.exists()) { - throw new IOException("Db dir " + storageIdDir.getAbsolutePath() + - " not found for HddsVolume: " + getStorageDir().getAbsolutePath()); - } + File containerDBFile = new File(storageIdDir, CONTAINER_DB_NAME); + if (!containerDBFile.exists()) { + throw new IOException("Db dir " + storageIdDir.getAbsolutePath() + + " not found for HddsVolume: " + getStorageDir().getAbsolutePath()); + } - String containerDBPath = containerDBFile.getAbsolutePath(); - try { - initPerDiskDBStore(containerDBPath, getConf(), readOnly); + String containerDBPath = containerDBFile.getAbsolutePath(); + try { + initPerDiskDBStore(containerDBPath, getConf(), readOnly); + } catch (IOException e) { + throw new IOException("Can't init db instance under path " + + containerDBPath + " for volume " + getStorageID(), e); + } + dbParentDir = storageIdDir; + dbLoaded.set(true); + dbLoadFailure.set(false); + LOG.info("SchemaV3 db is loaded at {} for volume {}", containerDBPath, + getStorageID()); } catch (IOException e) { - throw new IOException("Can't init db instance under path " - + containerDBPath + " for volume " + getStorageID(), e); + dbLoadFailure.set(true); + throw e; } - - dbParentDir = storageIdDir; - dbLoaded.set(true); - LOG.info("SchemaV3 db is loaded at {} for volume {}", containerDBPath, - getStorageID()); } /** @@ -417,9 +439,11 @@ public void createDbStore(MutableVolumeSet dbVolumeSet) throws IOException { try { HddsVolumeUtil.initPerDiskDBStore(containerDBPath, getConf(), false); dbLoaded.set(true); + dbLoadFailure.set(false); LOG.info("SchemaV3 db is created and loaded at {} for volume {}", containerDBPath, getStorageID()); } catch (IOException e) { + dbLoadFailure.set(true); String errMsg = "Can't create db instance under path " + containerDBPath + " for volume " + getStorageID(); LOG.error(errMsg, e); @@ -448,6 +472,7 @@ private void closeDbStore() { .getAbsolutePath(); DatanodeStoreCache.getInstance().removeDB(containerDBPath); dbLoaded.set(false); + dbLoadFailure.set(false); LOG.info("SchemaV3 db is stopped at {} for volume {}", containerDBPath, getStorageID()); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 3c0b6e618ee1..e195b127d499 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -442,12 +442,20 @@ public Map> getVolumeStateMap() { public boolean hasEnoughVolumes() { // Max number of bad volumes allowed, should have at least // 1 good volume + boolean hasEnoughVolumes; if (maxVolumeFailuresTolerated == StorageVolumeChecker.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { - return getVolumesList().size() >= 1; + hasEnoughVolumes = getVolumesList().size() >= 1; } else { - return getFailedVolumesList().size() <= maxVolumeFailuresTolerated; + hasEnoughVolumes = getFailedVolumesList().size() <= maxVolumeFailuresTolerated; } + if (!hasEnoughVolumes) { + LOG.error("Not enough volumes in MutableVolumeSet. DatanodeUUID: {}, VolumeType: {}, " + + "MaxVolumeFailuresTolerated: {}, ActiveVolumes: {}, FailedVolumes: {}", + datanodeUuid, volumeType, maxVolumeFailuresTolerated, + getVolumesList().size(), getFailedVolumesList().size()); + } + return hasEnoughVolumes; } public StorageLocationReport[] getStorageReport() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java index a45055821a41..e966a0bed862 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java @@ -124,15 +124,13 @@ private static String getContainerSubDirectory(long containerId) { */ public static File getContainerDBFile(KeyValueContainerData containerData) { if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + Preconditions.checkNotNull(containerData.getVolume().getDbParentDir(), "Base Directory cannot be null"); return new File(containerData.getVolume().getDbParentDir(), OzoneConsts.CONTAINER_DB_NAME); } - return getContainerDBFile(containerData.getMetadataPath(), containerData); - } - - public static File getContainerDBFile(String baseDir, - KeyValueContainerData containerData) { - return new File(baseDir, containerData.getContainerID() + + Preconditions.checkNotNull(containerData.getMetadataPath(), "Metadata Directory cannot be null"); + return new File(containerData.getMetadataPath(), containerData.getContainerID() + OzoneConsts.DN_CONTAINER_DB); } + } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java index d05c127838f1..387997db736d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.volume.DbVolume; @@ -43,7 +44,13 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrowsExactly; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mockStatic; + +import org.mockito.MockedStatic; +import org.mockito.Mockito; + /** * Test for {@link HddsVolumeUtil}. @@ -95,6 +102,34 @@ public void teardown() { dbVolumeSet.shutdown(); } + @Test + public void testLoadHDDVolumeWithInitDBException() + throws Exception { + // Create db instances for all HDDsVolumes. + for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList( + hddsVolumeSet.getVolumesList())) { + hddsVolume.format(clusterId); + hddsVolume.createWorkingDir(clusterId, null); + } + + try (MockedStatic mocked = mockStatic(HddsVolumeUtil.class, Mockito.CALLS_REAL_METHODS)) { + // Simulating the init DB Exception + mocked.when(() -> HddsVolumeUtil.initPerDiskDBStore(Mockito.anyString(), Mockito.any(), Mockito.anyBoolean())) + .thenThrow(new IOException("Mocked Exception")); + + reinitVolumes(); + for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList( + hddsVolumeSet.getVolumesList())) { + assertThrowsExactly(IOException.class, () -> hddsVolume.loadDbStore(true)); + // If the Volume init DB is abnormal, the Volume should be recognized as a failed Volume + assertEquals(VolumeCheckResult.FAILED, hddsVolume.check(false)); + assertTrue(hddsVolume.isDbLoadFailure()); + assertFalse(hddsVolume.isDbLoaded()); + } + } + + } + @Test public void testLoadAllHddsVolumeDbStoreWithoutDbVolumes() throws IOException { From f42c4f4e96cfa44a445a958ecbf619d2e620dc2a Mon Sep 17 00:00:00 2001 From: xichen01 Date: Sun, 28 Jan 2024 20:38:27 +0800 Subject: [PATCH 02/10] reduce unused change --- .../ozone/container/common/volume/HddsVolume.java | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 835f8e8f6b09..648a81bc4882 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -360,16 +360,13 @@ public void loadDbStore(boolean readOnly) throws IOException { try { if (!clusterIdDir.exists()) { throw new IOException("Working dir " + clusterIdDir.getAbsolutePath() + - " not created for HddsVolume: " + - getStorageDir().getAbsolutePath()); + " not created for HddsVolume: " + getStorageDir().getAbsolutePath()); } File storageIdDir = new File(clusterIdDir, getStorageID()); if (!storageIdDir.exists()) { - throw new IOException( - "Db parent dir " + storageIdDir.getAbsolutePath() + - " not found for HddsVolume: " + - getStorageDir().getAbsolutePath()); + throw new IOException("Db parent dir " + storageIdDir.getAbsolutePath() + + " not found for HddsVolume: " + getStorageDir().getAbsolutePath()); } File containerDBFile = new File(storageIdDir, CONTAINER_DB_NAME); @@ -385,6 +382,7 @@ public void loadDbStore(boolean readOnly) throws IOException { throw new IOException("Can't init db instance under path " + containerDBPath + " for volume " + getStorageID(), e); } + dbParentDir = storageIdDir; dbLoaded.set(true); dbLoadFailure.set(false); From 148cd24c8c6f5f94d0e409f8570195d40644e50c Mon Sep 17 00:00:00 2001 From: xichen01 Date: Wed, 27 Mar 2024 17:07:14 +0800 Subject: [PATCH 03/10] Catch the Throwable Exception that occur when loading exception; Reduce some unnecessary changes --- .../ozone/container/common/volume/HddsVolume.java | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 648a81bc4882..2b87c33ff482 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -258,7 +258,7 @@ public synchronized VolumeCheckResult check(@Nullable Boolean unused) VolumeCheckResult result = super.check(unused); DatanodeConfiguration df = getConf().getObject(DatanodeConfiguration.class); - if (isDbLoadFailure()) { + if (isDbLoaded() && isDbLoadFailure()) { LOG.warn("Volume {} failed to access RocksDB: RocksDB parent directory is null, " + "the volume might not have been loaded properly.", getStorageDir()); return VolumeCheckResult.FAILED; @@ -269,12 +269,8 @@ public synchronized VolumeCheckResult check(@Nullable Boolean unused) } // Check that per-volume RocksDB is present. - File dbFile = dbParentDir == null ? null : new File(dbParentDir, CONTAINER_DB_NAME); - if (dbFile == null || !dbFile.exists() || !dbFile.canRead()) { - if (dbFile == null) { - LOG.warn("Volume {} failed to access RocksDB: RocksDB parent directory is null, " + - "the volume might not have been loaded properly.", getStorageDir()); - } + File dbFile = new File(dbParentDir, CONTAINER_DB_NAME); + if (!dbFile.exists() || !dbFile.canRead()) { LOG.warn("Volume {} failed health check. Could not access RocksDB at " + "{}", getStorageDir(), dbFile); return VolumeCheckResult.FAILED; @@ -388,7 +384,7 @@ public void loadDbStore(boolean readOnly) throws IOException { dbLoadFailure.set(false); LOG.info("SchemaV3 db is loaded at {} for volume {}", containerDBPath, getStorageID()); - } catch (IOException e) { + } catch (Throwable e) { dbLoadFailure.set(true); throw e; } From fade60c67ee8656b6003d73848cf3dbbd77b88d3 Mon Sep 17 00:00:00 2001 From: xichen01 Date: Wed, 27 Mar 2024 18:07:22 +0800 Subject: [PATCH 04/10] Fix test --- .../ozone/container/common/volume/HddsVolume.java | 5 +++++ .../ozone/container/common/ContainerTestUtils.java | 9 ++++++++- .../container/common/impl/TestHddsDispatcher.java | 12 +++++++++++- .../common/volume/TestVolumeSetDiskChecks.java | 10 ++++++++++ .../upgrade/TestDatanodeUpgradeToScmHA.java | 3 +++ .../container/metrics/TestContainerMetrics.java | 6 ++++++ .../ozoneimpl/TestOzoneContainerWithTLS.java | 5 +++++ .../ozoneimpl/TestSecureOzoneContainer.java | 5 +++++ .../ozone/container/server/TestContainerServer.java | 6 ++++++ .../container/server/TestSecureContainerServer.java | 6 ++++++ 10 files changed, 65 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 2b87c33ff482..714a83bd7227 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -319,6 +319,11 @@ public File getDbParentDir() { return this.dbParentDir; } + @VisibleForTesting + public void setDbParentDir(File dbParentDir) { + this.dbParentDir = dbParentDir; + } + public File getDeletedContainerDir() { return this.deletedContainerDir; } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index e04d8f004936..13e20ea0015b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -67,6 +67,8 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.Collections; import java.util.Map; import java.util.Random; @@ -130,8 +132,13 @@ public static EndpointStateMachine createEndpoint(Configuration conf, public static OzoneContainer getOzoneContainer( DatanodeDetails datanodeDetails, OzoneConfiguration conf) throws IOException { + Path tempDir = Files.createTempDirectory(""); StateContext context = getMockContext(datanodeDetails, conf); - return new OzoneContainer(datanodeDetails, conf, context); + OzoneContainer container = new OzoneContainer(datanodeDetails, conf, context); + MutableVolumeSet volumeSet = container.getVolumeSet(); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); + return container; } public static StateContext getMockContext(DatanodeDetails datanodeDetails, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index bd0356324030..12e532abed3b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -51,6 +51,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.Op; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; @@ -63,11 +64,13 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.time.Duration; import java.util.Collections; import java.util.HashMap; @@ -95,10 +98,13 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -/** +/**container.create * Test-cases to verify the functionality of HddsDispatcher. */ public class TestHddsDispatcher { + @TempDir + private Path tempDir; + private static final Logger LOG = LoggerFactory.getLogger( TestHddsDispatcher.class); @@ -128,6 +134,8 @@ public void testContainerCloseActionWhenFull( (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString()); Container container = new KeyValueContainer(containerData, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); @@ -197,6 +205,8 @@ public void testContainerCloseActionWhenVolumeFull( 50, UUID.randomUUID().toString(), dd.getUuidString()); Container container = new KeyValueContainer(containerData, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index 27e1195a24b0..08c8e33b901f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -21,6 +21,7 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -46,6 +47,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -59,6 +61,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,6 +79,9 @@ */ @Timeout(30) public class TestVolumeSetDiskChecks { + @TempDir + private Path tempDir; + public static final Logger LOG = LoggerFactory.getLogger( TestVolumeSetDiskChecks.class); @@ -301,11 +307,15 @@ public void testVolumeFailure() throws IOException { dummyChecker); KeyValueContainer container = new KeyValueContainer(data, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); conSet.addContainer(container); KeyValueContainer container1 = new KeyValueContainer(data1, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet1.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container1.create(volumeSet1, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); conSet.addContainer(container1); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java index 137214aa1cd6..59b88bcbea46 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ContainerReplicationSource; @@ -532,6 +533,8 @@ public void restartDatanode(int expectedMlv, boolean exactMatch) // Start new datanode with the same configuration. dsm = new DatanodeStateMachine(dd, conf); + StorageVolumeUtil.getHddsVolumesList(dsm.getContainer().getVolumeSet().getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); int mlv = dsm.getLayoutVersionManager().getMetadataLayoutVersion(); if (exactMatch) { assertEquals(expectedMlv, mlv); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index d4900bb48783..dd4ab3d60b4c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.metrics; import java.io.File; +import java.nio.file.Path; import java.util.List; import java.util.Map; import java.util.UUID; @@ -59,12 +60,15 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; /** * Test for metrics published by storage containers. */ @Timeout(300) public class TestContainerMetrics { + @TempDir + private Path tempDir; @Test public void testContainerMetrics() throws Exception { @@ -105,6 +109,8 @@ public void testContainerMetrics() throws Exception { } HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); dispatcher.setClusterId(UUID.randomUUID().toString()); server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java index b3c8b732c16c..24d040ea4ff9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java @@ -38,6 +38,8 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -300,6 +302,9 @@ private OzoneContainer createAndStartOzoneContainerInstance() { StateContext stateContext = ContainerTestUtils.getMockContext(dn, conf); container = new OzoneContainer( dn, conf, stateContext, caClient, keyClient); + MutableVolumeSet volumeSet = container.getVolumeSet(); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); container.start(clusterID); } catch (Throwable e) { if (container != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java index 715b0678a173..1d611acbbafd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java @@ -38,6 +38,8 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ozone.test.GenericTestUtils; @@ -137,6 +139,9 @@ void testCreateOzoneContainer(boolean requireToken, boolean hasToken, DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); container = new OzoneContainer(dn, conf, ContainerTestUtils .getMockContext(dn, conf), caClient, secretKeyClient); + MutableVolumeSet volumeSet = container.getVolumeSet(); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); //Set scmId and manually start ozone container. container.start(UUID.randomUUID().toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 2e3cefb94fe7..56ef0f666a09 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -20,6 +20,8 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; @@ -57,6 +59,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; @@ -191,6 +194,9 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + Path tempDir = Files.createTempDirectory(""); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 3e2e092c2f5a..b9ec32089416 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -20,6 +20,8 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.EnumSet; @@ -65,6 +67,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; @@ -175,6 +178,9 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + Path tempDir = Files.createTempDirectory(""); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); From 9b344dd19bab491125167e8648976e706f689f53 Mon Sep 17 00:00:00 2001 From: xichen01 Date: Wed, 27 Mar 2024 23:00:54 +0800 Subject: [PATCH 05/10] Fix test --- .../hadoop/ozone/container/common/volume/HddsVolume.java | 2 +- .../hadoop/ozone/container/common/ContainerTestUtils.java | 5 ----- .../container/replication/TestGrpcReplicationService.java | 3 +++ .../ozone/container/ozoneimpl/TestOzoneContainer.java | 6 ++++++ 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index d8601ffb1d35..dc7f5bfb9297 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -258,7 +258,7 @@ public synchronized VolumeCheckResult check(@Nullable Boolean unused) VolumeCheckResult result = super.check(unused); DatanodeConfiguration df = getConf().getObject(DatanodeConfiguration.class); - if (isDbLoaded() && isDbLoadFailure()) { + if (isDbLoadFailure()) { LOG.warn("Volume {} failed to access RocksDB: RocksDB parent directory is null, " + "the volume might not have been loaded properly.", getStorageDir()); return VolumeCheckResult.FAILED; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index e0b758b265d4..bfad9b112564 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -67,8 +67,6 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.Collections; import java.util.Map; import java.util.Random; @@ -132,12 +130,9 @@ public static EndpointStateMachine createEndpoint(Configuration conf, public static OzoneContainer getOzoneContainer( DatanodeDetails datanodeDetails, OzoneConfiguration conf) throws IOException { - Path tempDir = Files.createTempDirectory(""); StateContext context = getMockContext(datanodeDetails, conf); OzoneContainer container = new OzoneContainer(datanodeDetails, conf, context); MutableVolumeSet volumeSet = container.getVolumeSet(); - StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) - .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); return container; } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index bad3e7ee81db..03901b99be3b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; @@ -143,6 +144,8 @@ public void init(boolean isZeroCopy) throws Exception { ContainerLayoutVersion.FILE_PER_BLOCK, GB, UUID.randomUUID().toString(), datanode.getUuidString()); KeyValueContainer container = new KeyValueContainer(data, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), "test-replication"); containerSet.addContainer(container); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 1b8bae0d03a8..1c5da04c0a3e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -30,11 +30,13 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import java.io.File; +import java.nio.file.Path; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -55,6 +57,8 @@ */ @Timeout(300) public class TestOzoneContainer { + @TempDir + private Path tempDir; @Test public void testCreateOzoneContainer( @@ -75,6 +79,8 @@ public void testCreateOzoneContainer( DatanodeDetails datanodeDetails = randomDatanodeDetails(); container = ContainerTestUtils .getOzoneContainer(datanodeDetails, conf); + StorageVolumeUtil.getHddsVolumesList(container.getVolumeSet().getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); //Set clusterId and manually start ozone container. container.start(UUID.randomUUID().toString()); From a736e71d8e18dbe1e2993c893209892373474274 Mon Sep 17 00:00:00 2001 From: xichen01 Date: Thu, 28 Mar 2024 00:28:51 +0800 Subject: [PATCH 06/10] findbugs --- .../hadoop/ozone/container/common/ContainerTestUtils.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index bfad9b112564..c63f82025e09 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -131,9 +131,7 @@ public static OzoneContainer getOzoneContainer( DatanodeDetails datanodeDetails, OzoneConfiguration conf) throws IOException { StateContext context = getMockContext(datanodeDetails, conf); - OzoneContainer container = new OzoneContainer(datanodeDetails, conf, context); - MutableVolumeSet volumeSet = container.getVolumeSet(); - return container; + return new OzoneContainer(datanodeDetails, conf, context); } public static StateContext getMockContext(DatanodeDetails datanodeDetails, From 564c71078486aff1a87ffb7f12b2366014592fed Mon Sep 17 00:00:00 2001 From: xichen01 Date: Thu, 28 Mar 2024 16:53:58 +0800 Subject: [PATCH 07/10] Fix test --- .../container/common/volume/HddsVolume.java | 53 +++++++++---------- .../common/impl/TestHddsDispatcher.java | 2 +- 2 files changed, 25 insertions(+), 30 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index dc7f5bfb9297..d4cdaf2cfe41 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -358,41 +358,36 @@ public void loadDbStore(boolean readOnly) throws IOException { File clusterIdDir = new File(dbVolume == null ? getStorageDir() : dbVolume.getStorageDir(), getClusterID()); - try { - if (!clusterIdDir.exists()) { - throw new IOException("Working dir " + clusterIdDir.getAbsolutePath() + - " not created for HddsVolume: " + getStorageDir().getAbsolutePath()); - } - - File storageIdDir = new File(clusterIdDir, getStorageID()); - if (!storageIdDir.exists()) { - throw new IOException("Db parent dir " + storageIdDir.getAbsolutePath() + - " not found for HddsVolume: " + getStorageDir().getAbsolutePath()); - } + if (!clusterIdDir.exists()) { + throw new IOException("Working dir " + clusterIdDir.getAbsolutePath() + + " not created for HddsVolume: " + getStorageDir().getAbsolutePath()); + } - File containerDBFile = new File(storageIdDir, CONTAINER_DB_NAME); - if (!containerDBFile.exists()) { - throw new IOException("Db dir " + storageIdDir.getAbsolutePath() + - " not found for HddsVolume: " + getStorageDir().getAbsolutePath()); - } + File storageIdDir = new File(clusterIdDir, getStorageID()); + if (!storageIdDir.exists()) { + throw new IOException("Db parent dir " + storageIdDir.getAbsolutePath() + + " not found for HddsVolume: " + getStorageDir().getAbsolutePath()); + } - String containerDBPath = containerDBFile.getAbsolutePath(); - try { - initPerDiskDBStore(containerDBPath, getConf(), readOnly); - } catch (IOException e) { - throw new IOException("Can't init db instance under path " - + containerDBPath + " for volume " + getStorageID(), e); - } + File containerDBFile = new File(storageIdDir, CONTAINER_DB_NAME); + if (!containerDBFile.exists()) { + throw new IOException("Db dir " + storageIdDir.getAbsolutePath() + + " not found for HddsVolume: " + getStorageDir().getAbsolutePath()); + } - dbParentDir = storageIdDir; - dbLoaded.set(true); - dbLoadFailure.set(false); - LOG.info("SchemaV3 db is loaded at {} for volume {}", containerDBPath, - getStorageID()); + String containerDBPath = containerDBFile.getAbsolutePath(); + try { + initPerDiskDBStore(containerDBPath, getConf(), readOnly); } catch (Throwable e) { dbLoadFailure.set(true); - throw e; + throw new IOException("Can't init db instance under path " + + containerDBPath + " for volume " + getStorageID(), e); } + + dbParentDir = storageIdDir; + dbLoaded.set(true); + LOG.info("SchemaV3 db is loaded at {} for volume {}", containerDBPath, + getStorageID()); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 9ab7d5759a1e..e5f6dc7edefd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -101,7 +101,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -/**container.create +/** * Test-cases to verify the functionality of HddsDispatcher. */ public class TestHddsDispatcher { From 30c978bc64ace3ada94759cc32f39f7d4e6dd816 Mon Sep 17 00:00:00 2001 From: xichen01 Date: Thu, 28 Mar 2024 20:04:02 +0800 Subject: [PATCH 08/10] fix test --- .../apache/hadoop/ozone/container/common/TestEndPoint.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index c74e274d3d72..6a4cebe9c7a9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -184,9 +184,9 @@ public void testDeletedContainersClearedOnStartup() throws Exception { ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); + OzoneContainer ozoneContainer = createVolume(ozoneConf); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { - OzoneContainer ozoneContainer = createVolume(ozoneConf); HddsVolume hddsVolume = (HddsVolume) ozoneContainer.getVolumeSet() .getVolumesList().get(0); KeyValueContainer kvContainer = addContainer(ozoneConf, hddsVolume); @@ -212,6 +212,8 @@ public void testDeletedContainersClearedOnStartup() throws Exception { hddsVolume.getDeletedContainerDir().listFiles(); assertNotNull(leftoverContainers); assertEquals(0, leftoverContainers.length); + } finally { + ozoneContainer.stop(); } } From 32cd82635634ccab7b4324e964fec177f707f3c8 Mon Sep 17 00:00:00 2001 From: xichen01 Date: Wed, 3 Apr 2024 23:08:54 +0800 Subject: [PATCH 09/10] Replace uncleaned temporary directory --- .../ozone/container/server/TestContainerServer.java | 7 ++++--- .../ozone/container/server/TestSecureContainerServer.java | 8 ++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index eaeb13144440..630c4d314959 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -20,7 +20,6 @@ import java.io.File; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; @@ -72,6 +71,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; @@ -87,6 +87,8 @@ public class TestContainerServer { .getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClient caClient; + @TempDir + private Path tempDir; @BeforeAll public static void setup() { @@ -185,7 +187,7 @@ static void runTestClientServer( } } - private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); @@ -195,7 +197,6 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); - Path tempDir = Files.createTempDirectory(""); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index f6789a29af85..02b533260ac3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -169,7 +169,7 @@ public void testClientServer() throws Exception { hddsDispatcher, caClient), (dn, p) -> { }, (p) -> { }); } - private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, @@ -205,7 +205,7 @@ public void testClientServerRatisGrpc() throws Exception { runTestClientServerRatis(GRPC, 3); } - static XceiverServerRatis newXceiverServerRatis( + XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); @@ -222,12 +222,12 @@ static XceiverServerRatis newXceiverServerRatis( caClient, null); } - private static void runTestClientServerRatis(RpcType rpc, int numNodes) + private void runTestClientServerRatis(RpcType rpc, int numNodes) throws Exception { runTestClientServer(numNodes, (pipeline, conf) -> RatisTestHelper.initRatisConf(rpc, conf), XceiverClientRatis::newXceiverClientRatis, - TestSecureContainerServer::newXceiverServerRatis, + this::newXceiverServerRatis, (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p), (p) -> { }); } From 2205d3930d69bd9b90be0a70bd9783bdefd72af9 Mon Sep 17 00:00:00 2001 From: xichen01 Date: Mon, 8 Apr 2024 13:12:27 +0800 Subject: [PATCH 10/10] Replace uncleaned temporary directory --- .../ozone/container/server/TestSecureContainerServer.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 02b533260ac3..8044685bb747 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -20,7 +20,6 @@ import java.io.File; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; @@ -106,6 +105,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; import static org.assertj.core.api.Assertions.assertThat; @@ -118,6 +118,8 @@ * Test Container servers when security is enabled. */ public class TestSecureContainerServer { + @TempDir + private Path tempDir; private static final String TEST_DIR = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); @@ -178,7 +180,6 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); - Path tempDir = Files.createTempDirectory(""); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf);