From 8f3a844a4b737506a1451b066f4fc657c6e2751c Mon Sep 17 00:00:00 2001 From: peterxcli Date: Sat, 15 Mar 2025 16:34:22 +0800 Subject: [PATCH 01/18] HDDS-12547. Container creation and import use the same VolumeChoosingPolicy --- .../volume/CapacityVolumeChoosingPolicy.java | 6 ++--- .../volume/VolumeChoosingPolicyFactory.java | 25 +++++++++++++++++++ .../container/keyvalue/KeyValueHandler.java | 2 +- .../replication/ContainerImporter.java | 2 +- 4 files changed, 29 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java index 10794c8fa72a..0b6e86585a1b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.util.List; -import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; import java.util.stream.Collectors; import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; @@ -44,9 +44,6 @@ public class CapacityVolumeChoosingPolicy implements VolumeChoosingPolicy { public static final Logger LOG = LoggerFactory.getLogger( CapacityVolumeChoosingPolicy.class); - // Stores the index of the next volume to be returned. - private final Random random = new Random(); - @Override public HddsVolume chooseVolume(List volumes, long maxContainerSize) throws IOException { @@ -83,6 +80,7 @@ public HddsVolume chooseVolume(List volumes, // 4. vol2 + vol2: 25%, result is vol2 // So we have a total of 75% chances to choose vol1, which meets our // expectation. + ThreadLocalRandom random = ThreadLocalRandom.current(); int firstIndex = random.nextInt(count); int secondIndex = random.nextInt(count); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java index bb5f967ba80d..3046cd5e5607 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java @@ -32,6 +32,8 @@ public final class VolumeChoosingPolicyFactory { private static final Class DEFAULT_VOLUME_CHOOSING_POLICY = CapacityVolumeChoosingPolicy.class; + private static volatile VolumeChoosingPolicy policyInstance; + private VolumeChoosingPolicyFactory() { } @@ -41,4 +43,27 @@ public static VolumeChoosingPolicy getPolicy(ConfigurationSource conf) DEFAULT_VOLUME_CHOOSING_POLICY, VolumeChoosingPolicy.class) .newInstance(); } + + /** + * Get a shared singleton instance of VolumeChoosingPolicy. + * This method ensures that only one instance of VolumeChoosingPolicy + * is created and shared across the application. + * + * @param conf Configuration + * @return The shared VolumeChoosingPolicy instance + */ + public static VolumeChoosingPolicy getSharedPolicy(ConfigurationSource conf) { + if (policyInstance == null) { + synchronized (VolumeChoosingPolicyFactory.class) { + if (policyInstance == null) { + try { + policyInstance = getPolicy(conf); + } catch (Exception e) { + throw new RuntimeException("Failed to create VolumeChoosingPolicy", e); + } + } + } + } + return policyInstance; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 572d30d8db8a..2c8db66136ee 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -175,7 +175,7 @@ public KeyValueHandler(ConfigurationSource config, chunkManager = ChunkManagerFactory.createChunkManager(config, blockManager, volSet); try { - volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getSharedPolicy(conf); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index 46bbb6662015..21670e2df47f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -75,7 +75,7 @@ public ContainerImporter(@Nonnull ConfigurationSource conf, this.controller = controller; this.volumeSet = volumeSet; try { - volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getSharedPolicy(conf); } catch (Exception e) { throw new RuntimeException(e); } From 8d1e90a96b1ffb703a6af8c5d97f303f2e42ed28 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Sat, 15 Mar 2025 17:52:40 +0800 Subject: [PATCH 02/18] Don't handle exception in VolumeChoosingPolicyFactory#getSharedPolicy --- .../volume/VolumeChoosingPolicyFactory.java | 9 +++---- .../keyvalue/TestKeyValueHandler.java | 26 +++++++++++-------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java index 3046cd5e5607..207d789fb22e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java @@ -52,15 +52,12 @@ public static VolumeChoosingPolicy getPolicy(ConfigurationSource conf) * @param conf Configuration * @return The shared VolumeChoosingPolicy instance */ - public static VolumeChoosingPolicy getSharedPolicy(ConfigurationSource conf) { + public static VolumeChoosingPolicy getSharedPolicy(ConfigurationSource conf) + throws InstantiationException, IllegalAccessException { if (policyInstance == null) { synchronized (VolumeChoosingPolicyFactory.class) { if (policyInstance == null) { - try { - policyInstance = getPolicy(conf); - } catch (Exception e) { - throw new RuntimeException("Failed to create VolumeChoosingPolicy", e); - } + policyInstance = getPolicy(conf); } } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index f55a515a5b72..b16d1e135cb5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -282,6 +282,21 @@ public void testVolumeSetInKeyValueHandler() throws Exception { DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); StateContext context = ContainerTestUtils.getMockContext( datanodeDetails, conf); + + //Set a class which is not of sub class of VolumeChoosingPolicy + conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, + "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher"); + RuntimeException exception = assertThrows(RuntimeException.class, + () -> new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, + metrics, c -> { + })); + + assertThat(exception).hasMessageEndingWith( + "class org.apache.hadoop.ozone.container.common.impl.HddsDispatcher " + + "not org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy"); + + conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, + "org.apache.hadoop.ozone.container.common.volume.CapacityVolumeChoosingPolicy"); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, metrics, c -> { @@ -297,17 +312,6 @@ public void testVolumeSetInKeyValueHandler() throws Exception { metrics, c -> { }); assertEquals(ContainerLayoutVersion.FILE_PER_BLOCK, conf.getEnum(OZONE_SCM_CONTAINER_LAYOUT_KEY, ContainerLayoutVersion.FILE_PER_CHUNK)); - - //Set a class which is not of sub class of VolumeChoosingPolicy - conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, - "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher"); - RuntimeException exception = assertThrows(RuntimeException.class, - () -> new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, - metrics, c -> { })); - - assertThat(exception).hasMessageEndingWith( - "class org.apache.hadoop.ozone.container.common.impl.HddsDispatcher " + - "not org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy"); } finally { volumeSet.shutdown(); FileUtil.fullyDelete(datanodeDir); From ecb4c38754e8912edd7a54eaaa65618cdcdcb157 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Tue, 18 Mar 2025 18:04:52 +0800 Subject: [PATCH 03/18] Move to VolumeChoosingPolicy DatanodeStateMachine and pass it to KeyValueHandler and ContainerImporter --- .../container/common/interfaces/Handler.java | 6 ++- .../statemachine/DatanodeStateMachine.java | 17 ++++++++- .../volume/VolumeChoosingPolicyFactory.java | 22 ----------- .../container/keyvalue/KeyValueHandler.java | 17 +++------ .../container/ozoneimpl/OzoneContainer.java | 13 ++++--- .../replication/ContainerImporter.java | 10 ++--- .../common/TestDatanodeStateMachine.java | 4 ++ .../keyvalue/TestKeyValueHandler.java | 38 +++++-------------- 8 files changed, 48 insertions(+), 79 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java index fabc92ff8bfb..63af6ce70f40 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java @@ -67,15 +67,17 @@ protected Handler(ConfigurationSource config, String datanodeId, this.icrSender = icrSender; } + @SuppressWarnings("checkstyle:ParameterNumber") public static Handler getHandlerForContainerType( final ContainerType containerType, final ConfigurationSource config, final String datanodeId, final ContainerSet contSet, - final VolumeSet volumeSet, final ContainerMetrics metrics, + final VolumeSet volumeSet, final VolumeChoosingPolicy volumeChoosingPolicy, + final ContainerMetrics metrics, IncrementalReportSender icrSender) { switch (containerType) { case KeyValueContainer: return new KeyValueHandler(config, - datanodeId, contSet, volumeSet, metrics, + datanodeId, contSet, volumeSet, volumeChoosingPolicy, metrics, icrSender); default: throw new IllegalArgumentException("Handler for ContainerType: " + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index e4d2050e4274..ee53ff3290ff 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -49,6 +49,7 @@ import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.HddsDatanodeStopService; import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.report.ReportManager; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CloseContainerCommandHandler; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ClosePipelineCommandHandler; @@ -61,6 +62,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.RefreshVolumeUsageCommandHandler; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ReplicateContainerCommandHandler; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.SetNodeOperationalStateCommandHandler; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionMetrics; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -98,6 +100,7 @@ public class DatanodeStateMachine implements Closeable { private final SCMConnectionManager connectionManager; private final ECReconstructionCoordinator ecReconstructionCoordinator; private StateContext context; + private VolumeChoosingPolicy volumeChoosingPolicy; private final OzoneContainer container; private final DatanodeDetails datanodeDetails; private final CommandDispatcher commandDispatcher; @@ -174,13 +177,18 @@ public DatanodeStateMachine(HddsDatanodeService hddsDatanodeService, connectionManager = new SCMConnectionManager(conf); context = new StateContext(this.conf, DatanodeStates.getInitState(), this, threadNamePrefix); + try { + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); + } catch (Exception e) { + throw new RuntimeException(e); + } // OzoneContainer instance is used in a non-thread safe way by the context // past to its constructor, so we much synchronize its access. See // HDDS-3116 for more details. constructionLock.writeLock().lock(); try { container = new OzoneContainer(hddsDatanodeService, this.datanodeDetails, - conf, context, certClient, secretKeyClient); + conf, context, certClient, secretKeyClient, volumeChoosingPolicy); } finally { constructionLock.writeLock().unlock(); } @@ -189,7 +197,8 @@ public DatanodeStateMachine(HddsDatanodeService hddsDatanodeService, ContainerImporter importer = new ContainerImporter(conf, container.getContainerSet(), container.getController(), - container.getVolumeSet()); + container.getVolumeSet(), + volumeChoosingPolicy); ContainerReplicator pullReplicator = new DownloadAndImportReplicator( conf, container.getContainerSet(), importer, @@ -745,4 +754,8 @@ public DatanodeQueueMetrics getQueueMetrics() { public ReconfigurationHandler getReconfigurationHandler() { return reconfigurationHandler; } + + public VolumeChoosingPolicy getVolumeChoosingPolicy() { + return volumeChoosingPolicy; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java index 207d789fb22e..bb5f967ba80d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java @@ -32,8 +32,6 @@ public final class VolumeChoosingPolicyFactory { private static final Class DEFAULT_VOLUME_CHOOSING_POLICY = CapacityVolumeChoosingPolicy.class; - private static volatile VolumeChoosingPolicy policyInstance; - private VolumeChoosingPolicyFactory() { } @@ -43,24 +41,4 @@ public static VolumeChoosingPolicy getPolicy(ConfigurationSource conf) DEFAULT_VOLUME_CHOOSING_POLICY, VolumeChoosingPolicy.class) .newInstance(); } - - /** - * Get a shared singleton instance of VolumeChoosingPolicy. - * This method ensures that only one instance of VolumeChoosingPolicy - * is created and shared across the application. - * - * @param conf Configuration - * @return The shared VolumeChoosingPolicy instance - */ - public static VolumeChoosingPolicy getSharedPolicy(ConfigurationSource conf) - throws InstantiationException, IllegalAccessException { - if (policyInstance == null) { - synchronized (VolumeChoosingPolicyFactory.class) { - if (policyInstance == null) { - policyInstance = getPolicy(conf); - } - } - } - return policyInstance; - } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 2c8db66136ee..7b9d9a395669 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -115,7 +115,6 @@ import org.apache.hadoop.ozone.container.common.utils.ContainerLogger; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; @@ -155,15 +154,18 @@ public KeyValueHandler(ConfigurationSource config, String datanodeId, ContainerSet contSet, VolumeSet volSet, + VolumeChoosingPolicy volumeChoosingPolicy, ContainerMetrics metrics, IncrementalReportSender icrSender) { - this(config, datanodeId, contSet, volSet, metrics, icrSender, Clock.systemUTC()); + this(config, datanodeId, contSet, volSet, volumeChoosingPolicy, metrics, icrSender, Clock.systemUTC()); } + @SuppressWarnings("checkstyle:ParameterNumber") public KeyValueHandler(ConfigurationSource config, String datanodeId, ContainerSet contSet, VolumeSet volSet, + VolumeChoosingPolicy volumeChoosingPolicy, ContainerMetrics metrics, IncrementalReportSender icrSender, Clock clock) { @@ -174,11 +176,7 @@ public KeyValueHandler(ConfigurationSource config, DatanodeConfiguration.class).isChunkDataValidationCheck(); chunkManager = ChunkManagerFactory.createChunkManager(config, blockManager, volSet); - try { - volumeChoosingPolicy = VolumeChoosingPolicyFactory.getSharedPolicy(conf); - } catch (Exception e) { - throw new RuntimeException(e); - } + this.volumeChoosingPolicy = volumeChoosingPolicy; maxContainerSize = (long) config.getStorageSize( ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, @@ -216,11 +214,6 @@ public KeyValueHandler(ConfigurationSource config, } } - @VisibleForTesting - public VolumeChoosingPolicy getVolumeChoosingPolicyForTesting() { - return volumeChoosingPolicy; - } - @Override public StateMachine.DataChannel getStreamDataChannel( Container container, ContainerCommandRequestProto msg) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index b3fa5133823e..2ec47227a76e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -73,6 +73,7 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -151,7 +152,8 @@ enum InitializingStatus { public OzoneContainer(HddsDatanodeService hddsDatanodeService, DatanodeDetails datanodeDetails, ConfigurationSource conf, StateContext context, CertificateClient certClient, - SecretKeyVerifierClient secretKeyClient) throws IOException { + SecretKeyVerifierClient secretKeyClient, + VolumeChoosingPolicy volumeChoosingPolicy) throws IOException { config = conf; this.datanodeDetails = datanodeDetails; this.context = context; @@ -213,7 +215,7 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, Handler.getHandlerForContainerType( containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), - containerSet, volumeSet, metrics, icrSender)); + containerSet, volumeSet, volumeChoosingPolicy, metrics, icrSender)); } SecurityConfig secConf = new SecurityConfig(conf); @@ -238,7 +240,7 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, secConf, certClient, new ContainerImporter(conf, containerSet, controller, - volumeSet), + volumeSet, volumeChoosingPolicy), datanodeDetails.threadNamePrefix()); readChannel = new XceiverServerGrpc( @@ -298,8 +300,9 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, @VisibleForTesting public OzoneContainer( DatanodeDetails datanodeDetails, ConfigurationSource conf, - StateContext context) throws IOException { - this(null, datanodeDetails, conf, context, null, null); + StateContext context, VolumeChoosingPolicy volumeChoosingPolicy) + throws IOException { + this(null, datanodeDetails, conf, context, null, null, volumeChoosingPolicy); } public GrpcTlsConfig getTlsClientConfig() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index 21670e2df47f..f286685db430 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -39,7 +39,6 @@ import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; -import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; @@ -70,15 +69,12 @@ public class ContainerImporter { public ContainerImporter(@Nonnull ConfigurationSource conf, @Nonnull ContainerSet containerSet, @Nonnull ContainerController controller, - @Nonnull MutableVolumeSet volumeSet) { + @Nonnull MutableVolumeSet volumeSet, + @Nonnull VolumeChoosingPolicy volumeChoosingPolicy) { this.containerSet = containerSet; this.controller = controller; this.volumeSet = volumeSet; - try { - volumeChoosingPolicy = VolumeChoosingPolicyFactory.getSharedPolicy(conf); - } catch (Exception e) { - throw new RuntimeException(e); - } + this.volumeChoosingPolicy = volumeChoosingPolicy; containerSize = (long) conf.getStorageSize( ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 0ee12be72363..1a3893990956 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -44,12 +44,14 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.states.DatanodeState; import org.apache.hadoop.ozone.container.common.states.datanode.InitDatanodeState; import org.apache.hadoop.ozone.container.common.states.datanode.RunningDatanodeState; +import org.apache.hadoop.ozone.container.common.volume.CapacityVolumeChoosingPolicy; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; @@ -204,6 +206,8 @@ public void testDatanodeStateContext() throws IOException, ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf); try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(datanodeDetails, conf)) { + VolumeChoosingPolicy volumeChoosingPolicy = stateMachine.getVolumeChoosingPolicy(); + assertEquals(CapacityVolumeChoosingPolicy.class, volumeChoosingPolicy.getClass()); DatanodeStateMachine.DatanodeStates currentState = stateMachine.getContext().getState(); assertEquals(DatanodeStateMachine.DatanodeStates.INIT, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index b16d1e135cb5..726184acb0fd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.container.keyvalue; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY; @@ -26,7 +25,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.mock; @@ -63,12 +61,14 @@ import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeEach; @@ -94,8 +94,10 @@ public class TestKeyValueHandler { private HddsDispatcher dispatcher; private KeyValueHandler handler; + private VolumeChoosingPolicy volumeChoosingPolicy; + @BeforeEach - public void setup() throws StorageContainerException { + public void setup() throws StorageContainerException, IllegalAccessException, InstantiationException { // Create mock HddsDispatcher and KeyValueHandler. handler = mock(KeyValueHandler.class); @@ -112,6 +114,7 @@ public void setup() throws StorageContainerException { mock(TokenVerifier.class) ); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(new OzoneConfiguration()); } /** @@ -283,33 +286,10 @@ public void testVolumeSetInKeyValueHandler() throws Exception { StateContext context = ContainerTestUtils.getMockContext( datanodeDetails, conf); - //Set a class which is not of sub class of VolumeChoosingPolicy - conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, - "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher"); - RuntimeException exception = assertThrows(RuntimeException.class, - () -> new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, - metrics, c -> { - })); - - assertThat(exception).hasMessageEndingWith( - "class org.apache.hadoop.ozone.container.common.impl.HddsDispatcher " + - "not org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy"); - - conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, - "org.apache.hadoop.ozone.container.common.volume.CapacityVolumeChoosingPolicy"); - KeyValueHandler keyValueHandler = new KeyValueHandler(conf, - context.getParent().getDatanodeDetails().getUuidString(), cset, - volumeSet, metrics, c -> { - }); - assertEquals("org.apache.hadoop.ozone.container.common" + - ".volume.CapacityVolumeChoosingPolicy", - keyValueHandler.getVolumeChoosingPolicyForTesting() - .getClass().getName()); - // Ensures that KeyValueHandler falls back to FILE_PER_BLOCK. conf.set(OZONE_SCM_CONTAINER_LAYOUT_KEY, "FILE_PER_CHUNK"); new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, - metrics, c -> { }); + volumeChoosingPolicy, metrics, c -> { }); assertEquals(ContainerLayoutVersion.FILE_PER_BLOCK, conf.getEnum(OZONE_SCM_CONTAINER_LAYOUT_KEY, ContainerLayoutVersion.FILE_PER_CHUNK)); } finally { @@ -396,7 +376,7 @@ public void testDeleteContainer() throws IOException { final AtomicInteger icrReceived = new AtomicInteger(0); final KeyValueHandler kvHandler = new KeyValueHandler(conf, - datanodeId, containerSet, volumeSet, metrics, + datanodeId, containerSet, volumeSet, volumeChoosingPolicy, metrics, c -> icrReceived.incrementAndGet()); kvHandler.setClusterID(clusterId); @@ -497,7 +477,7 @@ public void testDeleteContainerTimeout() throws IOException { final AtomicInteger icrReceived = new AtomicInteger(0); final KeyValueHandler kvHandler = new KeyValueHandler(conf, - datanodeId, containerSet, volumeSet, metrics, + datanodeId, containerSet, volumeSet, volumeChoosingPolicy, metrics, c -> icrReceived.incrementAndGet(), clock); kvHandler.setClusterID(clusterId); From fa003823eb4399f1a2ac2fea287b028030bb2209 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Tue, 18 Mar 2025 18:06:35 +0800 Subject: [PATCH 04/18] Adjust others to fit new interface of the volumeChoosingPolicy place change --- .../container/common/ContainerTestUtils.java | 6 +++-- .../common/TestBlockDeletingService.java | 22 +++++++++++-------- .../TestSchemaOneBackwardsCompatibility.java | 8 +++++-- .../TestSchemaTwoBackwardsCompatibility.java | 9 +++++--- .../common/impl/TestContainerPersistence.java | 2 +- .../common/impl/TestHddsDispatcher.java | 16 +++++++++++--- .../common/interfaces/TestHandler.java | 4 +++- .../TestDeleteBlocksCommandHandler.java | 2 +- ...KeyValueHandlerWithUnhealthyContainer.java | 8 ++++++- .../replication/TestContainerImporter.java | 12 ++++++---- .../TestGrpcReplicationService.java | 5 ++++- .../TestReplicationSupervisor.java | 11 +++++++--- .../TestSendContainerRequestHandler.java | 9 ++++++-- .../ozone/container/common/TestEndPoint.java | 22 ++++++++++++++----- .../client/rpc/TestECKeyOutputStream.java | 3 ++- .../metrics/TestContainerMetrics.java | 9 +++++--- .../ozoneimpl/TestOzoneContainerWithTLS.java | 5 ++++- .../ozoneimpl/TestSecureOzoneContainer.java | 6 ++++- .../container/server/TestContainerServer.java | 8 +++++-- .../server/TestSecureContainerServer.java | 7 ++++-- .../datanode/container/ContainerCommands.java | 6 ++++- .../datanode/container/InspectSubcommand.java | 2 +- .../freon/ClosedContainerReplicator.java | 8 +++++-- 23 files changed, 137 insertions(+), 53 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index e33a4c4a3ce5..6a258e237d6d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -66,6 +66,7 @@ import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; @@ -128,9 +129,10 @@ public static EndpointStateMachine createEndpoint(Configuration conf, public static OzoneContainer getOzoneContainer( DatanodeDetails datanodeDetails, OzoneConfiguration conf) - throws IOException { + throws IOException, InstantiationException, IllegalAccessException { StateContext context = getMockContext(datanodeDetails, conf); - return new OzoneContainer(datanodeDetails, conf, context); + VolumeChoosingPolicy volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); + return new OzoneContainer(datanodeDetails, conf, context, volumeChoosingPolicy); } public static StateContext getMockContext(DatanodeDetails datanodeDetails, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 4e8ad609f2b5..000429b84701 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -84,10 +84,12 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; @@ -122,6 +124,7 @@ public class TestBlockDeletingService { private String scmId; private String datanodeUuid; private final OzoneConfiguration conf = new OzoneConfiguration(); + private VolumeChoosingPolicy volumeChoosingPolicy; private ContainerLayoutVersion layout; private String schemaVersion; @@ -129,7 +132,7 @@ public class TestBlockDeletingService { private MutableVolumeSet volumeSet; @BeforeEach - public void init() throws IOException { + public void init() throws Exception { CodecBuffer.enableLeakDetection(); scmId = UUID.randomUUID().toString(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); @@ -138,6 +141,7 @@ public void init() throws IOException { volumeSet = new MutableVolumeSet(datanodeUuid, scmId, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); createDbInstancesForTestIfNeeded(volumeSet, scmId, scmId, conf); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); } @AfterEach @@ -470,7 +474,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { + volumeChoosingPolicy, metrics, c -> { }); OzoneContainer ozoneContainer = mockDependencies(containerSet, keyValueHandler); @@ -539,7 +543,7 @@ public void testBlockDeletion(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { + volumeChoosingPolicy, metrics, c -> { }); BlockDeletingServiceTestImpl svc = getBlockDeletingService(containerSet, conf, keyValueHandler); @@ -668,7 +672,7 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { + volumeChoosingPolicy, metrics, c -> { }); BlockDeletingServiceTestImpl svc = getBlockDeletingService(containerSet, conf, keyValueHandler); @@ -774,7 +778,7 @@ public void testShutdownService(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { + volumeChoosingPolicy, metrics, c -> { }); BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler); @@ -804,7 +808,7 @@ public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { + volumeChoosingPolicy, metrics, c -> { }); // set timeout value as 1ns to trigger timeout behavior long timeout = 1; @@ -911,7 +915,7 @@ public void testContainerThrottle(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { + volumeChoosingPolicy, metrics, c -> { }); BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler); @@ -970,7 +974,7 @@ public void testContainerMaxLockHoldingTime( chunksPerBlock); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - ContainerMetrics.create(conf), c -> { + volumeChoosingPolicy, ContainerMetrics.create(conf), c -> { }); BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler); @@ -1029,7 +1033,7 @@ public void testBlockThrottle(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { + volumeChoosingPolicy, metrics, c -> { }); int containerCount = 5; int blocksPerContainer = 3; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index f92c89480a70..bb8883eaeefd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -54,8 +54,10 @@ import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; @@ -90,6 +92,7 @@ */ public class TestSchemaOneBackwardsCompatibility { private OzoneConfiguration conf; + private VolumeChoosingPolicy volumeChoosingPolicy; private File metadataDir; private File containerFile; @@ -129,6 +132,7 @@ private void setup(String schemaVersion) throws Exception { metadataDir.getAbsolutePath()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metadataDir.getAbsolutePath()); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); } @AfterEach @@ -279,7 +283,7 @@ public void testDelete(String schemaVersion) throws Exception { ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { + volumeChoosingPolicy, metrics, c -> { }); long initialTotalSpace = newKvData().getBytesUsed(); long blockSpace = initialTotalSpace / TestDB.KEY_COUNT; @@ -352,7 +356,7 @@ public void testReadDeletedBlockChunkInfo(String schemaVersion) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - metrics, c -> { + volumeChoosingPolicy, metrics, c -> { }); KeyValueContainerData cData = newKvData(); try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java index 1dcc3be6c337..959143b92c22 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java @@ -26,7 +26,7 @@ import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE; import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.Mockito.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -58,9 +58,11 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; @@ -105,7 +107,7 @@ public class TestSchemaTwoBackwardsCompatibility { private ContainerSet containerSet; private KeyValueHandler keyValueHandler; private OzoneContainer ozoneContainer; - + private VolumeChoosingPolicy volumeChoosingPolicy; private static final int BLOCKS_PER_CONTAINER = 6; private static final int CHUNKS_PER_BLOCK = 2; private static final int DELETE_TXNS_PER_CONTAINER = 2; @@ -128,13 +130,14 @@ public void setup() throws Exception { volumeSet = new MutableVolumeSet(datanodeUuid, clusterID, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); blockManager = new BlockManagerImpl(conf); chunkManager = new FilePerBlockStrategy(true, blockManager); containerSet = newContainerSet(); keyValueHandler = new KeyValueHandler(conf, datanodeUuid, - containerSet, volumeSet, ContainerMetrics.create(conf), c -> { }); + containerSet, volumeSet, volumeChoosingPolicy, ContainerMetrics.create(conf), c -> { }); ozoneContainer = mock(OzoneContainer.class); when(ozoneContainer.getContainerSet()).thenReturn(containerSet); when(ozoneContainer.getWriteChannel()).thenReturn(null); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 0a31b9746d57..09beb4499409 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -310,7 +310,7 @@ public void testDeleteNonEmptyContainer(ContainerTestVersionInfo versionInfo) AtomicInteger icrReceived = new AtomicInteger(0); KeyValueHandler kvHandler = new KeyValueHandler(conf, - datanodeId, containerSet, volumeSet, metrics, + datanodeId, containerSet, volumeSet, volumeChoosingPolicy, metrics, c -> icrReceived.incrementAndGet()); Exception exception = assertThrows( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 257c104ce79c..b708ebe5ca72 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -77,6 +77,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -88,6 +89,7 @@ import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; @@ -95,6 +97,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; @@ -112,10 +115,17 @@ public class TestHddsDispatcher { @TempDir private File testDir; + private static VolumeChoosingPolicy volumeChoosingPolicy; + public static final IncrementalReportSender NO_OP_ICR_SENDER = c -> { }; + @BeforeEach + public void setUp() throws Exception { + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(new OzoneConfiguration()); + } + @ContainerLayoutTestInfo.ContainerTest public void testContainerCloseActionWhenFull( ContainerLayoutVersion layout) throws IOException { @@ -148,7 +158,7 @@ public void testContainerCloseActionWhenFull( handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), - containerSet, volumeSet, metrics, NO_OP_ICR_SENDER)); + containerSet, volumeSet, volumeChoosingPolicy, metrics, NO_OP_ICR_SENDER)); } HddsDispatcher hddsDispatcher = new HddsDispatcher( conf, containerSet, volumeSet, handlers, context, metrics, null); @@ -284,7 +294,7 @@ public void testContainerCloseActionWhenVolumeFull( handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), - containerSet, volumeSet, metrics, NO_OP_ICR_SENDER)); + containerSet, volumeSet, volumeChoosingPolicy, metrics, NO_OP_ICR_SENDER)); } HddsDispatcher hddsDispatcher = new HddsDispatcher( conf, containerSet, volumeSet, handlers, context, metrics, null); @@ -533,7 +543,7 @@ static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), - containerSet, volumeSet, metrics, NO_OP_ICR_SENDER)); + containerSet, volumeSet, volumeChoosingPolicy, metrics, NO_OP_ICR_SENDER)); } final HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java index 708e109c0e6b..829f8c55ee50 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; import org.junit.jupiter.api.AfterEach; @@ -56,6 +57,7 @@ public void setup() throws Exception { this.conf = new OzoneConfiguration(); this.containerSet = mock(ContainerSet.class); this.volumeSet = mock(MutableVolumeSet.class); + VolumeChoosingPolicy volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); StateContext context = ContainerTestUtils.getMockContext( datanodeDetails, conf); @@ -67,7 +69,7 @@ public void setup() throws Exception { Handler.getHandlerForContainerType( containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), - containerSet, volumeSet, metrics, + containerSet, volumeSet, volumeChoosingPolicy, metrics, TestHddsDispatcher.NO_OP_ICR_SENDER)); } this.dispatcher = new HddsDispatcher( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java index fe3872e2dbde..37a22016e5c6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java @@ -298,7 +298,7 @@ public void testDeleteCmdWorkerInterval( @Test public void testDeleteBlockCommandHandleWhenDeleteCommandQueuesFull() - throws IOException { + throws IOException, IllegalAccessException, InstantiationException { int blockDeleteQueueLimit = 5; // Setting up the test environment OzoneConfiguration configuration = new OzoneConfiguration(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java index de40c83d0e28..01915319b71c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java @@ -50,10 +50,12 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -76,9 +78,12 @@ public class TestKeyValueHandlerWithUnhealthyContainer { private IncrementalReportSender mockIcrSender; + private VolumeChoosingPolicy volumeChoosingPolicy; + @BeforeEach - public void init() { + public void init() throws Exception { mockIcrSender = mock(IncrementalReportSender.class); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(new OzoneConfiguration()); } @Test @@ -255,6 +260,7 @@ private KeyValueHandler getDummyHandler() { stateMachine.getDatanodeDetails().getUuidString(), mock(ContainerSet.class), mock(MutableVolumeSet.class), + volumeChoosingPolicy, mock(ContainerMetrics.class), mockIcrSender); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java index c283d5a9840b..190125bf8371 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java @@ -51,8 +51,10 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; @@ -72,11 +74,13 @@ class TestContainerImporter { private File tempDir; private OzoneConfiguration conf; + private VolumeChoosingPolicy volumeChoosingPolicy; @BeforeEach - void setup() { + void setup() throws Exception { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath()); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); } @Test @@ -93,7 +97,7 @@ void importSameContainerWhenAlreadyImport() throws Exception { MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = new ContainerImporter(conf, - containerSet, controllerMock, volumeSet); + containerSet, controllerMock, volumeSet, volumeChoosingPolicy); File tarFile = new File("dummy.tar"); // second import should fail immediately StorageContainerException ex = assertThrows(StorageContainerException.class, @@ -123,7 +127,7 @@ void importSameContainerWhenFirstInProgress() throws Exception { MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = new ContainerImporter(conf, - containerSet, controllerMock, volumeSet); + containerSet, controllerMock, volumeSet, volumeChoosingPolicy); // run import async first time having delay File tarFile = containerTarFile(containerId, containerData); CompletableFuture.runAsync(() -> { @@ -162,7 +166,7 @@ public void testInconsistentChecksumContainerShouldThrowError() throws Exception MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = spy(new ContainerImporter(conf, - containerSet, controllerMock, volumeSet)); + containerSet, controllerMock, volumeSet, volumeChoosingPolicy)); TarContainerPacker packer = mock(TarContainerPacker.class); when(packer.unpackContainerDescriptor(any())).thenReturn("test".getBytes( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index 9822c07068d5..28d372fd18c6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -52,10 +52,12 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; @@ -124,11 +126,12 @@ public void init() throws Exception { MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList( new HddsVolume.Builder(testDir).conf(conf).build())); + VolumeChoosingPolicy volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Handler containerHandler = new KeyValueHandler(conf, datanode.getUuidString(), containerSet, - volumeSet, metrics, c -> { + volumeSet, volumeChoosingPolicy, metrics, c -> { }); containerController = new ContainerController(containerSet, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index b6517f4fead9..700828c2dfa3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -72,11 +72,13 @@ import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCommandInfo; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinatorTask; @@ -123,6 +125,8 @@ public class TestReplicationSupervisor { private TestClock clock; private DatanodeDetails datanode; + private VolumeChoosingPolicy volumeChoosingPolicy; + @BeforeEach public void setUp() throws Exception { clock = new TestClock(Instant.now(), ZoneId.systemDefault()); @@ -135,6 +139,7 @@ public void setUp() throws Exception { context.setTermOfLeaderSCM(CURRENT_TERM); datanode = MockDatanodeDetails.randomDatanodeDetails(); when(stateMachine.getDatanodeDetails()).thenReturn(datanode); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(new OzoneConfiguration()); } @AfterEach @@ -286,7 +291,7 @@ public void slowDownload() { @ContainerLayoutTestInfo.ContainerTest public void testDownloadAndImportReplicatorFailure(ContainerLayoutVersion layout, - @TempDir File tempFile) throws IOException { + @TempDir File tempFile) throws IOException, InstantiationException, IllegalAccessException { OzoneConfiguration conf = new OzoneConfiguration(); ReplicationSupervisor supervisor = ReplicationSupervisor.newBuilder() @@ -312,7 +317,7 @@ public void testDownloadAndImportReplicatorFailure(ContainerLayoutVersion layout ContainerController mockedCC = mock(ContainerController.class); ContainerImporter importer = - new ContainerImporter(conf, set, mockedCC, volumeSet); + new ContainerImporter(conf, set, mockedCC, volumeSet, volumeChoosingPolicy); ContainerReplicator replicator = new DownloadAndImportReplicator(conf, set, importer, moc); @@ -431,7 +436,7 @@ public void testMultipleReplication(ContainerLayoutVersion layout, when(volumeSet.getVolumesList()).thenReturn(singletonList( new HddsVolume.Builder(testDir).conf(conf).build())); ContainerController mockedCC = mock(ContainerController.class); - ContainerImporter importer = new ContainerImporter(conf, set, mockedCC, volumeSet); + ContainerImporter importer = new ContainerImporter(conf, set, mockedCC, volumeSet, volumeChoosingPolicy); ContainerReplicator replicator = new DownloadAndImportReplicator( conf, set, importer, moc); replicatorRef.set(replicator); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java index 226cb07c0cb2..d6c351a69e63 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java @@ -32,8 +32,10 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; @@ -53,10 +55,13 @@ class TestSendContainerRequestHandler { private OzoneConfiguration conf; + private VolumeChoosingPolicy volumeChoosingPolicy; + @BeforeEach - void setup() { + void setup() throws Exception { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath()); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); } @Test @@ -67,7 +72,7 @@ void testReceiveDataForExistingContainer() throws Exception { MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = new ContainerImporter(conf, - newContainerSet(0), mock(ContainerController.class), volumeSet); + newContainerSet(0), mock(ContainerController.class), volumeSet, volumeChoosingPolicy); KeyValueContainerData containerData = new KeyValueContainerData(containerId, ContainerLayoutVersion.FILE_PER_BLOCK, 100, "test", "test"); // add container to container set diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 64f903f45057..8ef99cd321ab 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -63,6 +63,7 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -73,6 +74,7 @@ import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; @@ -96,6 +98,7 @@ public class TestEndPoint { @TempDir private static File testDir; private static OzoneConfiguration ozoneConf; + private static VolumeChoosingPolicy volumeChoosingPolicy; private static DatanodeLayoutStorage layoutStorage; private static DatanodeDetails dnDetails; @@ -121,6 +124,7 @@ static void setUp() throws Exception { layoutStorage.initialize(); scmServer = SCMTestUtils.startScmRpcServer(ozoneConf, scmServerImpl, serverAddress, 10); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(ozoneConf); } /** @@ -155,7 +159,8 @@ public void testGetVersionTask() throws Exception { ozoneConf.setBoolean( OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, - ozoneConf, ContainerTestUtils.getMockContext(dnDetails, ozoneConf)); + ozoneConf, ContainerTestUtils.getMockContext(dnDetails, ozoneConf), + volumeChoosingPolicy); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, ozoneConf, ozoneContainer); @@ -230,7 +235,8 @@ public void testCheckVersionResponse() throws Exception { GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer .captureLogs(VersionEndpointTask.LOG); OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, ozoneConf, - ContainerTestUtils.getMockContext(dnDetails, ozoneConf)); + ContainerTestUtils.getMockContext(dnDetails, ozoneConf), + volumeChoosingPolicy); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, ozoneConf, ozoneContainer); @@ -273,7 +279,8 @@ public void testDnLayoutVersionFile() throws Exception { try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, ozoneConf, - ContainerTestUtils.getMockContext(dnDetails, ozoneConf)); + ContainerTestUtils.getMockContext(dnDetails, ozoneConf), + volumeChoosingPolicy); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, ozoneConf, ozoneContainer); @@ -328,7 +335,8 @@ public void testGetVersionToInvalidEndpoint() throws Exception { rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); DatanodeDetails datanodeDetails = randomDatanodeDetails(); OzoneContainer ozoneContainer = new OzoneContainer(datanodeDetails, - conf, ContainerTestUtils.getMockContext(datanodeDetails, ozoneConf)); + conf, ContainerTestUtils.getMockContext(datanodeDetails, ozoneConf), + volumeChoosingPolicy); VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, conf, ozoneContainer); EndpointStateMachine.EndPointStates newState = versionTask.call(); @@ -355,7 +363,8 @@ public void testGetVersionAssertRpcTimeOut() throws Exception { rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); DatanodeDetails datanodeDetails = randomDatanodeDetails(); OzoneContainer ozoneContainer = new OzoneContainer(datanodeDetails, conf, - ContainerTestUtils.getMockContext(datanodeDetails, ozoneConf)); + ContainerTestUtils.getMockContext(datanodeDetails, ozoneConf), + volumeChoosingPolicy); VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, conf, ozoneContainer); @@ -648,7 +657,8 @@ public void testHeartbeatTaskRpcTimeOut() throws Exception { private OzoneContainer createVolume(OzoneConfiguration conf) throws IOException { OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, conf, - ContainerTestUtils.getMockContext(dnDetails, ozoneConf)); + ContainerTestUtils.getMockContext(dnDetails, ozoneConf), + volumeChoosingPolicy); String clusterId = scmServerImpl.getClusterId(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java index 2bc5947a7000..4a03bbad8a17 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestECKeyOutputStream.java @@ -198,7 +198,8 @@ public void testECKeyCreatetWithDatanodeIdChange() OzoneClient client1 = null; try (MockedStatic mockedHandler = Mockito.mockStatic(Handler.class, Mockito.CALLS_REAL_METHODS)) { Map handlers = new HashMap<>(); - mockedHandler.when(() -> Handler.getHandlerForContainerType(any(), any(), any(), any(), any(), any(), any())) + mockedHandler.when(() -> Handler.getHandlerForContainerType(any(), any(), any(), any(), any(), any(), any(), + any())) .thenAnswer(i -> { Handler handler = Mockito.spy((Handler) i.callRealMethod()); handlers.put(handler.getDatanodeId(), handler); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index a5fea3dfd7a0..91204b1e1f00 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -55,6 +55,7 @@ import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; @@ -63,6 +64,7 @@ import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.ratis.util.function.CheckedBiConsumer; @@ -86,15 +88,16 @@ public class TestContainerMetrics { private Path tempDir; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static final int DFS_METRICS_PERCENTILES_INTERVALS = 1; + private static VolumeChoosingPolicy volumeChoosingPolicy; @BeforeAll - public static void setup() { + public static void setup() throws Exception { DefaultMetricsSystem.setMiniClusterMode(true); CONF.setInt(HddsConfigKeys.HDDS_METRICS_PERCENTILES_INTERVALS_KEY, DFS_METRICS_PERCENTILES_INTERVALS); CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); CONF.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.toString()); - + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(CONF); } @AfterEach @@ -143,7 +146,7 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, VolumeSet volumeSet) handlers.put(containerType, Handler.getHandlerForContainerType(containerType, CONF, context.getParent().getDatanodeDetails().getUuidString(), - containerSet, volumeSet, metrics, + containerSet, volumeSet, volumeChoosingPolicy, metrics, c -> { })); } HddsDispatcher dispatcher = new HddsDispatcher(CONF, containerSet, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java index 62214ca2144e..0ebdad9299e2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java @@ -77,9 +77,11 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -308,8 +310,9 @@ private OzoneContainer createAndStartOzoneContainerInstance() { OzoneContainer container = null; try { StateContext stateContext = ContainerTestUtils.getMockContext(dn, conf); + VolumeChoosingPolicy volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); container = new OzoneContainer( - null, dn, conf, stateContext, caClient, keyClient); + null, dn, conf, stateContext, caClient, keyClient, volumeChoosingPolicy); MutableVolumeSet volumeSet = container.getVolumeSet(); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java index e3c3b53a22d1..b02c6bf39142 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java @@ -54,8 +54,10 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ratis.util.ExitUtils; @@ -83,6 +85,7 @@ class TestSecureOzoneContainer { private Path ozoneMetaPath; private OzoneConfiguration conf; + private VolumeChoosingPolicy volumeChoosingPolicy; private CertificateClientTestImpl caClient; private SecretKeyClient secretKeyClient; private ContainerTokenSecretManager secretManager; @@ -111,6 +114,7 @@ void setup() throws Exception { secretKeyClient = new SecretKeyTestClient(); secretManager = new ContainerTokenSecretManager( TimeUnit.DAYS.toMillis(1), secretKeyClient); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); } @ParameterizedTest @@ -134,7 +138,7 @@ void testCreateOzoneContainer(boolean requireToken, boolean hasToken, DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); container = new OzoneContainer(null, dn, conf, ContainerTestUtils - .getMockContext(dn, conf), caClient, secretKeyClient); + .getMockContext(dn, conf), caClient, secretKeyClient, volumeChoosingPolicy); MutableVolumeSet volumeSet = container.getVolumeSet(); StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index fe83aa0881ca..afe3fc4e1f18 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -58,6 +58,7 @@ import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; @@ -66,6 +67,7 @@ import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.ratis.rpc.RpcType; @@ -84,17 +86,19 @@ public class TestContainerServer { private static Path testDir; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClient caClient; + private static VolumeChoosingPolicy volumeChoosingPolicy; @TempDir private Path tempDir; @BeforeAll - public static void setup() { + public static void setup() throws Exception { DefaultMetricsSystem.setMiniClusterMode(true); CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, testDir.toString()); CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); caClient = new DNCertificateClient(new SecurityConfig(CONF), null, dn, null, null, null); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(CONF); } @AfterAll @@ -204,7 +208,7 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, dd.getUuid().toString(), - containerSet, volumeSet, metrics, + containerSet, volumeSet, volumeChoosingPolicy, metrics, c -> { })); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index f78971af4f50..be048e65b361 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -89,6 +89,7 @@ import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; @@ -96,6 +97,7 @@ import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.security.token.Token; @@ -121,7 +123,7 @@ public class TestSecureContainerServer { private static SecretKeyClient secretKeyClient; private static OzoneBlockTokenSecretManager blockTokenSecretManager; private static ContainerTokenSecretManager containerTokenSecretManager; - + private static VolumeChoosingPolicy volumeChoosingPolicy; @BeforeAll public static void setup() throws Exception { DefaultMetricsSystem.setMiniClusterMode(true); @@ -131,6 +133,7 @@ public static void setup() throws Exception { CONF.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); caClient = new CertificateClientTestImpl(CONF); secretKeyClient = new SecretKeyTestClient(); + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(CONF); long tokenLifetime = TimeUnit.HOURS.toMillis(1); @@ -179,7 +182,7 @@ private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, dd.getUuid().toString(), - containerSet, volumeSet, metrics, + containerSet, volumeSet, volumeChoosingPolicy, metrics, c -> { })); } HddsDispatcher hddsDispatcher = new HddsDispatcher( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java index 7945c3bbfbf9..cf25a656f3b5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java @@ -45,11 +45,13 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerReader; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; @@ -81,7 +83,7 @@ public class ContainerCommands extends AbstractSubcommand { private ContainerController controller; - public void loadContainersFromVolumes() throws IOException { + public void loadContainersFromVolumes() throws IOException, IllegalAccessException, InstantiationException { OzoneConfiguration conf = getOzoneConf(); ContainerSet containerSet = ContainerSet.newReadOnlyContainerSet(1000); @@ -96,6 +98,7 @@ public void loadContainersFromVolumes() throws IOException { volumeSet = new MutableVolumeSet(datanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + VolumeChoosingPolicy volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); if (VersionedDatanodeFeatures.SchemaV3.isFinalizedAndEnabled(conf)) { MutableVolumeSet dbVolumeSet = @@ -118,6 +121,7 @@ public void loadContainersFromVolumes() throws IOException { datanodeUuid, containerSet, volumeSet, + volumeChoosingPolicy, metrics, containerReplicaProto -> { }); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InspectSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InspectSubcommand.java index 5db6380d54df..a3ecc4b7c0fe 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InspectSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InspectSubcommand.java @@ -44,7 +44,7 @@ public class InspectSubcommand extends AbstractSubcommand implements Callable { }); @@ -236,7 +240,7 @@ private void initializeReplicationSupervisor( new ContainerController(containerSet, handlers); ContainerImporter importer = new ContainerImporter(conf, containerSet, - controller, volumeSet); + controller, volumeSet, volumeChoosingPolicy); replicator = new DownloadAndImportReplicator(conf, containerSet, importer, new SimpleContainerDownloader(conf, null)); From 71aacee3634d8891829f48be98f609eb3ee02518 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Wed, 19 Mar 2025 02:38:16 +0800 Subject: [PATCH 05/18] Convert reflection-related exceptions in getPolicy instead of its callers. --- .../common/statemachine/DatanodeStateMachine.java | 6 +----- .../common/volume/VolumeChoosingPolicyFactory.java | 11 ++++++----- .../ozone/container/common/ContainerTestUtils.java | 2 +- .../container/common/TestBlockDeletingService.java | 2 +- .../container/common/impl/TestHddsDispatcher.java | 6 +++--- .../TestDeleteBlocksCommandHandler.java | 2 +- .../ozone/container/keyvalue/TestKeyValueHandler.java | 2 +- .../TestKeyValueHandlerWithUnhealthyContainer.java | 2 +- .../container/replication/TestContainerImporter.java | 2 +- .../replication/TestReplicationSupervisor.java | 2 +- .../replication/TestSendContainerRequestHandler.java | 2 +- .../ozone/container/metrics/TestContainerMetrics.java | 2 +- .../ozone/container/server/TestContainerServer.java | 2 +- .../debug/datanode/container/ContainerCommands.java | 2 +- .../debug/datanode/container/InspectSubcommand.java | 2 +- .../hadoop/ozone/freon/ClosedContainerReplicator.java | 2 +- 16 files changed, 23 insertions(+), 26 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index ee53ff3290ff..eb1cf0f3c124 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -177,11 +177,7 @@ public DatanodeStateMachine(HddsDatanodeService hddsDatanodeService, connectionManager = new SCMConnectionManager(conf); context = new StateContext(this.conf, DatanodeStates.getInitState(), this, threadNamePrefix); - try { - volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); - } catch (Exception e) { - throw new RuntimeException(e); - } + volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); // OzoneContainer instance is used in a non-thread safe way by the context // past to its constructor, so we much synchronize its access. See // HDDS-3116 for more details. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java index bb5f967ba80d..28ad10a15872 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeChoosingPolicyFactory.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; +import org.apache.ratis.util.ReflectionUtils; /** * A factory to create volume choosing policy instance based on configuration @@ -35,10 +36,10 @@ public final class VolumeChoosingPolicyFactory { private VolumeChoosingPolicyFactory() { } - public static VolumeChoosingPolicy getPolicy(ConfigurationSource conf) - throws InstantiationException, IllegalAccessException { - return conf.getClass(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, - DEFAULT_VOLUME_CHOOSING_POLICY, VolumeChoosingPolicy.class) - .newInstance(); + public static VolumeChoosingPolicy getPolicy(ConfigurationSource conf) { + Class policyClass = conf.getClass( + HDDS_DATANODE_VOLUME_CHOOSING_POLICY, + DEFAULT_VOLUME_CHOOSING_POLICY, VolumeChoosingPolicy.class); + return ReflectionUtils.newInstance(policyClass); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 477e065c2e76..33c188c6d7fd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -131,7 +131,7 @@ public static EndpointStateMachine createEndpoint(Configuration conf, public static OzoneContainer getOzoneContainer( DatanodeDetails datanodeDetails, OzoneConfiguration conf) - throws IOException, InstantiationException, IllegalAccessException { + throws IOException { StateContext context = getMockContext(datanodeDetails, conf); VolumeChoosingPolicy volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); return new OzoneContainer(datanodeDetails, conf, context, volumeChoosingPolicy); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 000429b84701..b79973d73f63 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -132,7 +132,7 @@ public class TestBlockDeletingService { private MutableVolumeSet volumeSet; @BeforeEach - public void init() throws Exception { + public void init() throws IOException { CodecBuffer.enableLeakDetection(); scmId = UUID.randomUUID().toString(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index b708ebe5ca72..8153c662b28a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -97,7 +97,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; @@ -121,8 +121,8 @@ public class TestHddsDispatcher { c -> { }; - @BeforeEach - public void setUp() throws Exception { + @BeforeAll + public static void init() { volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(new OzoneConfiguration()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java index 37a22016e5c6..fe3872e2dbde 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java @@ -298,7 +298,7 @@ public void testDeleteCmdWorkerInterval( @Test public void testDeleteBlockCommandHandleWhenDeleteCommandQueuesFull() - throws IOException, IllegalAccessException, InstantiationException { + throws IOException { int blockDeleteQueueLimit = 5; // Setting up the test environment OzoneConfiguration configuration = new OzoneConfiguration(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 726184acb0fd..95da07a42558 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -97,7 +97,7 @@ public class TestKeyValueHandler { private VolumeChoosingPolicy volumeChoosingPolicy; @BeforeEach - public void setup() throws StorageContainerException, IllegalAccessException, InstantiationException { + public void setup() throws StorageContainerException { // Create mock HddsDispatcher and KeyValueHandler. handler = mock(KeyValueHandler.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java index 01915319b71c..1919291ebcbf 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java @@ -81,7 +81,7 @@ public class TestKeyValueHandlerWithUnhealthyContainer { private VolumeChoosingPolicy volumeChoosingPolicy; @BeforeEach - public void init() throws Exception { + public void init() { mockIcrSender = mock(IncrementalReportSender.class); volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(new OzoneConfiguration()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java index 190125bf8371..8b9d0d1d6d5f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java @@ -77,7 +77,7 @@ class TestContainerImporter { private VolumeChoosingPolicy volumeChoosingPolicy; @BeforeEach - void setup() throws Exception { + void setup() { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath()); volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index 700828c2dfa3..2cf5750c0cb9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -291,7 +291,7 @@ public void slowDownload() { @ContainerLayoutTestInfo.ContainerTest public void testDownloadAndImportReplicatorFailure(ContainerLayoutVersion layout, - @TempDir File tempFile) throws IOException, InstantiationException, IllegalAccessException { + @TempDir File tempFile) throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); ReplicationSupervisor supervisor = ReplicationSupervisor.newBuilder() diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java index d6c351a69e63..0d15e265ad91 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java @@ -58,7 +58,7 @@ class TestSendContainerRequestHandler { private VolumeChoosingPolicy volumeChoosingPolicy; @BeforeEach - void setup() throws Exception { + void setup() { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath()); volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index 91204b1e1f00..fdc2f3feb06c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -91,7 +91,7 @@ public class TestContainerMetrics { private static VolumeChoosingPolicy volumeChoosingPolicy; @BeforeAll - public static void setup() throws Exception { + public static void setup() { DefaultMetricsSystem.setMiniClusterMode(true); CONF.setInt(HddsConfigKeys.HDDS_METRICS_PERCENTILES_INTERVALS_KEY, DFS_METRICS_PERCENTILES_INTERVALS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index afe3fc4e1f18..b3fb32f07249 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -91,7 +91,7 @@ public class TestContainerServer { private Path tempDir; @BeforeAll - public static void setup() throws Exception { + public static void setup() { DefaultMetricsSystem.setMiniClusterMode(true); CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, testDir.toString()); CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java index cf25a656f3b5..83f4ae359643 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java @@ -83,7 +83,7 @@ public class ContainerCommands extends AbstractSubcommand { private ContainerController controller; - public void loadContainersFromVolumes() throws IOException, IllegalAccessException, InstantiationException { + public void loadContainersFromVolumes() throws IOException { OzoneConfiguration conf = getOzoneConf(); ContainerSet containerSet = ContainerSet.newReadOnlyContainerSet(1000); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InspectSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InspectSubcommand.java index a3ecc4b7c0fe..5db6380d54df 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InspectSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InspectSubcommand.java @@ -44,7 +44,7 @@ public class InspectSubcommand extends AbstractSubcommand implements Callable Date: Wed, 19 Mar 2025 02:55:56 +0800 Subject: [PATCH 06/18] Let KeyValueHandler create the policy if they get null instance (for tests). --- .../container/common/interfaces/Handler.java | 3 ++- .../container/keyvalue/KeyValueHandler.java | 7 ++++--- .../common/TestBlockDeletingService.java | 20 ++++++++----------- .../TestSchemaOneBackwardsCompatibility.java | 8 ++------ .../TestSchemaTwoBackwardsCompatibility.java | 6 +----- .../common/impl/TestContainerPersistence.java | 2 +- .../keyvalue/TestKeyValueHandler.java | 12 +++-------- ...KeyValueHandlerWithUnhealthyContainer.java | 6 ------ .../TestGrpcReplicationService.java | 5 +---- 9 files changed, 22 insertions(+), 47 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java index 63af6ce70f40..2cb757f1a1fc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.time.Clock; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; @@ -78,7 +79,7 @@ public static Handler getHandlerForContainerType( case KeyValueContainer: return new KeyValueHandler(config, datanodeId, contSet, volumeSet, volumeChoosingPolicy, metrics, - icrSender); + icrSender, Clock.systemUTC()); default: throw new IllegalArgumentException("Handler for ContainerType: " + containerType + "doesn't exist."); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 7b9d9a395669..f1d7e893c049 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -115,6 +115,7 @@ import org.apache.hadoop.ozone.container.common.utils.ContainerLogger; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; +import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; @@ -154,10 +155,9 @@ public KeyValueHandler(ConfigurationSource config, String datanodeId, ContainerSet contSet, VolumeSet volSet, - VolumeChoosingPolicy volumeChoosingPolicy, ContainerMetrics metrics, IncrementalReportSender icrSender) { - this(config, datanodeId, contSet, volSet, volumeChoosingPolicy, metrics, icrSender, Clock.systemUTC()); + this(config, datanodeId, contSet, volSet, null, metrics, icrSender, Clock.systemUTC()); } @SuppressWarnings("checkstyle:ParameterNumber") @@ -176,7 +176,8 @@ public KeyValueHandler(ConfigurationSource config, DatanodeConfiguration.class).isChunkDataValidationCheck(); chunkManager = ChunkManagerFactory.createChunkManager(config, blockManager, volSet); - this.volumeChoosingPolicy = volumeChoosingPolicy; + this.volumeChoosingPolicy = volumeChoosingPolicy != null ? volumeChoosingPolicy + : VolumeChoosingPolicyFactory.getPolicy(config); maxContainerSize = (long) config.getStorageSize( ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index b79973d73f63..4e8ad609f2b5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -84,12 +84,10 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; @@ -124,7 +122,6 @@ public class TestBlockDeletingService { private String scmId; private String datanodeUuid; private final OzoneConfiguration conf = new OzoneConfiguration(); - private VolumeChoosingPolicy volumeChoosingPolicy; private ContainerLayoutVersion layout; private String schemaVersion; @@ -141,7 +138,6 @@ public void init() throws IOException { volumeSet = new MutableVolumeSet(datanodeUuid, scmId, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); createDbInstancesForTestIfNeeded(volumeSet, scmId, scmId, conf); - volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); } @AfterEach @@ -474,7 +470,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - volumeChoosingPolicy, metrics, c -> { + metrics, c -> { }); OzoneContainer ozoneContainer = mockDependencies(containerSet, keyValueHandler); @@ -543,7 +539,7 @@ public void testBlockDeletion(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - volumeChoosingPolicy, metrics, c -> { + metrics, c -> { }); BlockDeletingServiceTestImpl svc = getBlockDeletingService(containerSet, conf, keyValueHandler); @@ -672,7 +668,7 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - volumeChoosingPolicy, metrics, c -> { + metrics, c -> { }); BlockDeletingServiceTestImpl svc = getBlockDeletingService(containerSet, conf, keyValueHandler); @@ -778,7 +774,7 @@ public void testShutdownService(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - volumeChoosingPolicy, metrics, c -> { + metrics, c -> { }); BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler); @@ -808,7 +804,7 @@ public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - volumeChoosingPolicy, metrics, c -> { + metrics, c -> { }); // set timeout value as 1ns to trigger timeout behavior long timeout = 1; @@ -915,7 +911,7 @@ public void testContainerThrottle(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - volumeChoosingPolicy, metrics, c -> { + metrics, c -> { }); BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler); @@ -974,7 +970,7 @@ public void testContainerMaxLockHoldingTime( chunksPerBlock); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - volumeChoosingPolicy, ContainerMetrics.create(conf), c -> { + ContainerMetrics.create(conf), c -> { }); BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler); @@ -1033,7 +1029,7 @@ public void testBlockThrottle(ContainerTestVersionInfo versionInfo) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - volumeChoosingPolicy, metrics, c -> { + metrics, c -> { }); int containerCount = 5; int blocksPerContainer = 3; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index bb8883eaeefd..f92c89480a70 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -54,10 +54,8 @@ import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; @@ -92,7 +90,6 @@ */ public class TestSchemaOneBackwardsCompatibility { private OzoneConfiguration conf; - private VolumeChoosingPolicy volumeChoosingPolicy; private File metadataDir; private File containerFile; @@ -132,7 +129,6 @@ private void setup(String schemaVersion) throws Exception { metadataDir.getAbsolutePath()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metadataDir.getAbsolutePath()); - volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); } @AfterEach @@ -283,7 +279,7 @@ public void testDelete(String schemaVersion) throws Exception { ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - volumeChoosingPolicy, metrics, c -> { + metrics, c -> { }); long initialTotalSpace = newKvData().getBytesUsed(); long blockSpace = initialTotalSpace / TestDB.KEY_COUNT; @@ -356,7 +352,7 @@ public void testReadDeletedBlockChunkInfo(String schemaVersion) ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, - volumeChoosingPolicy, metrics, c -> { + metrics, c -> { }); KeyValueContainerData cData = newKvData(); try (DBHandle refCountedDB = BlockUtils.getDB(cData, conf)) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java index 959143b92c22..12771d779358 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java @@ -58,11 +58,9 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; @@ -107,7 +105,6 @@ public class TestSchemaTwoBackwardsCompatibility { private ContainerSet containerSet; private KeyValueHandler keyValueHandler; private OzoneContainer ozoneContainer; - private VolumeChoosingPolicy volumeChoosingPolicy; private static final int BLOCKS_PER_CONTAINER = 6; private static final int CHUNKS_PER_BLOCK = 2; private static final int DELETE_TXNS_PER_CONTAINER = 2; @@ -130,14 +127,13 @@ public void setup() throws Exception { volumeSet = new MutableVolumeSet(datanodeUuid, clusterID, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); - volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); blockManager = new BlockManagerImpl(conf); chunkManager = new FilePerBlockStrategy(true, blockManager); containerSet = newContainerSet(); keyValueHandler = new KeyValueHandler(conf, datanodeUuid, - containerSet, volumeSet, volumeChoosingPolicy, ContainerMetrics.create(conf), c -> { }); + containerSet, volumeSet, ContainerMetrics.create(conf), c -> { }); ozoneContainer = mock(OzoneContainer.class); when(ozoneContainer.getContainerSet()).thenReturn(containerSet); when(ozoneContainer.getWriteChannel()).thenReturn(null); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 09beb4499409..0a31b9746d57 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -310,7 +310,7 @@ public void testDeleteNonEmptyContainer(ContainerTestVersionInfo versionInfo) AtomicInteger icrReceived = new AtomicInteger(0); KeyValueHandler kvHandler = new KeyValueHandler(conf, - datanodeId, containerSet, volumeSet, volumeChoosingPolicy, metrics, + datanodeId, containerSet, volumeSet, metrics, c -> icrReceived.incrementAndGet()); Exception exception = assertThrows( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 95da07a42558..142048f31a28 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -61,14 +61,12 @@ import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeEach; @@ -94,8 +92,6 @@ public class TestKeyValueHandler { private HddsDispatcher dispatcher; private KeyValueHandler handler; - private VolumeChoosingPolicy volumeChoosingPolicy; - @BeforeEach public void setup() throws StorageContainerException { // Create mock HddsDispatcher and KeyValueHandler. @@ -113,8 +109,6 @@ public void setup() throws StorageContainerException { mock(ContainerMetrics.class), mock(TokenVerifier.class) ); - - volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(new OzoneConfiguration()); } /** @@ -289,7 +283,7 @@ public void testVolumeSetInKeyValueHandler() throws Exception { // Ensures that KeyValueHandler falls back to FILE_PER_BLOCK. conf.set(OZONE_SCM_CONTAINER_LAYOUT_KEY, "FILE_PER_CHUNK"); new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, - volumeChoosingPolicy, metrics, c -> { }); + metrics, c -> { }); assertEquals(ContainerLayoutVersion.FILE_PER_BLOCK, conf.getEnum(OZONE_SCM_CONTAINER_LAYOUT_KEY, ContainerLayoutVersion.FILE_PER_CHUNK)); } finally { @@ -376,7 +370,7 @@ public void testDeleteContainer() throws IOException { final AtomicInteger icrReceived = new AtomicInteger(0); final KeyValueHandler kvHandler = new KeyValueHandler(conf, - datanodeId, containerSet, volumeSet, volumeChoosingPolicy, metrics, + datanodeId, containerSet, volumeSet, metrics, c -> icrReceived.incrementAndGet()); kvHandler.setClusterID(clusterId); @@ -477,7 +471,7 @@ public void testDeleteContainerTimeout() throws IOException { final AtomicInteger icrReceived = new AtomicInteger(0); final KeyValueHandler kvHandler = new KeyValueHandler(conf, - datanodeId, containerSet, volumeSet, volumeChoosingPolicy, metrics, + datanodeId, containerSet, volumeSet, null, metrics, c -> icrReceived.incrementAndGet(), clock); kvHandler.setClusterID(clusterId); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java index 1919291ebcbf..de40c83d0e28 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java @@ -50,12 +50,10 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; -import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -78,12 +76,9 @@ public class TestKeyValueHandlerWithUnhealthyContainer { private IncrementalReportSender mockIcrSender; - private VolumeChoosingPolicy volumeChoosingPolicy; - @BeforeEach public void init() { mockIcrSender = mock(IncrementalReportSender.class); - volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(new OzoneConfiguration()); } @Test @@ -260,7 +255,6 @@ private KeyValueHandler getDummyHandler() { stateMachine.getDatanodeDetails().getUuidString(), mock(ContainerSet.class), mock(MutableVolumeSet.class), - volumeChoosingPolicy, mock(ContainerMetrics.class), mockIcrSender); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index 28d372fd18c6..9822c07068d5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -52,12 +52,10 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; @@ -126,12 +124,11 @@ public void init() throws Exception { MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList( new HddsVolume.Builder(testDir).conf(conf).build())); - VolumeChoosingPolicy volumeChoosingPolicy = VolumeChoosingPolicyFactory.getPolicy(conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Handler containerHandler = new KeyValueHandler(conf, datanode.getUuidString(), containerSet, - volumeSet, volumeChoosingPolicy, metrics, c -> { + volumeSet, metrics, c -> { }); containerController = new ContainerController(containerSet, From 924eb8a0135093ea4943a408e3d3556d8adfc5f6 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Wed, 19 Mar 2025 03:33:03 +0800 Subject: [PATCH 07/18] Add comment for the change in CapacityVolumeChoosingPolicy --- .../container/common/volume/CapacityVolumeChoosingPolicy.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java index 0b6e86585a1b..2f892d62a41a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java @@ -80,6 +80,8 @@ public HddsVolume chooseVolume(List volumes, // 4. vol2 + vol2: 25%, result is vol2 // So we have a total of 75% chances to choose vol1, which meets our // expectation. + + // avoid lock contention and stores the index of the next volume to be returned in each thread. ThreadLocalRandom random = ThreadLocalRandom.current(); int firstIndex = random.nextInt(count); int secondIndex = random.nextInt(count); From 31b935ee8b5a55b47f673dd3f9d851006d810c72 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Wed, 19 Mar 2025 22:59:10 +0800 Subject: [PATCH 08/18] Make VolumeChoosingPolicy#chooseVolume method synchronized --- .../container/common/volume/CapacityVolumeChoosingPolicy.java | 2 +- .../container/common/volume/RoundRobinVolumeChoosingPolicy.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java index 2f892d62a41a..50def6c5ef29 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java @@ -45,7 +45,7 @@ public class CapacityVolumeChoosingPolicy implements VolumeChoosingPolicy { CapacityVolumeChoosingPolicy.class); @Override - public HddsVolume chooseVolume(List volumes, + public synchronized HddsVolume chooseVolume(List volumes, long maxContainerSize) throws IOException { // No volumes available to choose from diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java index 1fe1e32f2b66..1a6e5c2b12ed 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java @@ -41,7 +41,7 @@ public class RoundRobinVolumeChoosingPolicy implements VolumeChoosingPolicy { private AtomicInteger nextVolumeIndex = new AtomicInteger(0); @Override - public HddsVolume chooseVolume(List volumes, + public synchronized HddsVolume chooseVolume(List volumes, long maxContainerSize) throws IOException { // No volumes available to choose from From 5ead5ebb92c331d1fcb3fac7847c18b6e4e65968 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Thu, 20 Mar 2025 09:29:41 +0800 Subject: [PATCH 09/18] Revert threadLocalRandom --- .../common/volume/CapacityVolumeChoosingPolicy.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java index 50def6c5ef29..853ed1f19814 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.util.List; -import java.util.concurrent.ThreadLocalRandom; +import java.util.Random; import java.util.stream.Collectors; import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; @@ -44,6 +44,8 @@ public class CapacityVolumeChoosingPolicy implements VolumeChoosingPolicy { public static final Logger LOG = LoggerFactory.getLogger( CapacityVolumeChoosingPolicy.class); + // Stores the index of the next volume to be returned. + private final Random random = new Random(); @Override public synchronized HddsVolume chooseVolume(List volumes, long maxContainerSize) throws IOException { @@ -80,9 +82,6 @@ public synchronized HddsVolume chooseVolume(List volumes, // 4. vol2 + vol2: 25%, result is vol2 // So we have a total of 75% chances to choose vol1, which meets our // expectation. - - // avoid lock contention and stores the index of the next volume to be returned in each thread. - ThreadLocalRandom random = ThreadLocalRandom.current(); int firstIndex = random.nextInt(count); int secondIndex = random.nextInt(count); From b1bc0d675d526965210989524b35abb55e196f6d Mon Sep 17 00:00:00 2001 From: peterxcli Date: Fri, 21 Mar 2025 15:23:24 +0800 Subject: [PATCH 10/18] revert unnecessary change --- .../container/common/TestSchemaTwoBackwardsCompatibility.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java index 12771d779358..1dcc3be6c337 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java @@ -26,7 +26,7 @@ import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE; import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -105,6 +105,7 @@ public class TestSchemaTwoBackwardsCompatibility { private ContainerSet containerSet; private KeyValueHandler keyValueHandler; private OzoneContainer ozoneContainer; + private static final int BLOCKS_PER_CONTAINER = 6; private static final int CHUNKS_PER_BLOCK = 2; private static final int DELETE_TXNS_PER_CONTAINER = 2; From 88028caedabfece6c763ec218c8be76638a20570 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Fri, 21 Mar 2025 15:35:07 +0800 Subject: [PATCH 11/18] Move commit volume space to VolumeChoosingPolicy to make it atomic --- .../container/common/impl/ContainerData.java | 35 ------------------- .../container/common/impl/ContainerSet.java | 2 -- .../volume/CapacityVolumeChoosingPolicy.java | 5 ++- .../RoundRobinVolumeChoosingPolicy.java | 1 + 4 files changed, 5 insertions(+), 38 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java index 12c618b226fa..7d2074198572 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java @@ -214,16 +214,6 @@ public synchronized void setState(ContainerDataProto.State state) { (state != oldState)) { releaseCommitSpace(); } - - /** - * commit space when container transitions (back) to Open. - * when? perhaps closing a container threw an exception - */ - if ((state == ContainerDataProto.State.OPEN) && - (state != oldState)) { - Preconditions.checkState(getMaxSize() > 0); - commitSpace(); - } } /** @@ -361,31 +351,6 @@ private void releaseCommitSpace() { committedSpace = false; } - /** - * add available space in the container to the committed space in the volume. - * available space is the number of bytes remaining till max capacity. - */ - public void commitSpace() { - long unused = getMaxSize() - getBytesUsed(); - ContainerDataProto.State myState = getState(); - HddsVolume cVol; - - //we don't expect duplicate calls - Preconditions.checkState(!committedSpace); - - // Only Open Containers have Committed Space - if (myState != ContainerDataProto.State.OPEN) { - return; - } - - // junit tests do not always set up volume - cVol = getVolume(); - if (unused > 0 && (cVol != null)) { - cVol.incCommittedBytes(unused); - committedSpace = true; - } - } - /** * Get the number of bytes read from the container. * @return the number of bytes read from the container. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 22454e60a98c..9e3bef2f6ba0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -152,8 +152,6 @@ private boolean addContainer(Container container, boolean overwrite) throws throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); } missingContainerSet.remove(containerId); - // wish we could have done this from ContainerData.setState - container.getContainerData().commitSpace(); if (container.getContainerData().getState() == RECOVERING) { recoveringContainerMap.put( clock.millis() + recoveringTimeout, containerId); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java index 853ed1f19814..a8742ef3b3ec 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java @@ -92,7 +92,10 @@ public synchronized HddsVolume chooseVolume(List volumes, - firstVolume.getCommittedBytes(); long secondAvailable = secondVolume.getCurrentUsage().getAvailable() - secondVolume.getCommittedBytes(); - return firstAvailable < secondAvailable ? secondVolume : firstVolume; + HddsVolume selectedVolume = firstAvailable < secondAvailable ? secondVolume : firstVolume; + + selectedVolume.incCommittedBytes(maxContainerSize); + return selectedVolume; } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java index 1a6e5c2b12ed..8edf2bfc124a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java @@ -68,6 +68,7 @@ public synchronized HddsVolume chooseVolume(List volumes, if (hasEnoughSpace) { logIfSomeVolumesOutOfSpace(filter, LOG); nextVolumeIndex.compareAndSet(nextIndex, currentVolumeIndex); + volume.incCommittedBytes(maxContainerSize); return volume; } From 90740b7e284bdbbc8f2b3260b4e0e53b5115ae2c Mon Sep 17 00:00:00 2001 From: peterxcli Date: Sat, 22 Mar 2025 00:32:29 +0800 Subject: [PATCH 12/18] Set committedSpace for containerData manually after choosing volume --- .../container/common/impl/ContainerData.java | 30 +++++++++++++++++++ .../container/common/impl/ContainerSet.java | 2 ++ .../container/keyvalue/KeyValueContainer.java | 1 + .../replication/ContainerImporter.java | 12 ++++---- .../common/impl/TestContainerPersistence.java | 16 ++++++++-- 5 files changed, 54 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java index 7d2074198572..a4af2ed2a0b2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java @@ -351,6 +351,32 @@ private void releaseCommitSpace() { committedSpace = false; } + /** + * add available space in the container to the committed space in the volume. + * available space is the number of bytes remaining till max capacity. + */ + public void commitSpace() { + long unused = getMaxSize() - getBytesUsed(); + ContainerDataProto.State myState = getState(); + HddsVolume cVol; + + if (committedSpace) { + return; + } + + // Only Open Containers have Committed Space + if (myState != ContainerDataProto.State.OPEN) { + return; + } + + // junit tests do not always set up volume + cVol = getVolume(); + if (unused > 0 && (cVol != null)) { + cVol.incCommittedBytes(unused); + committedSpace = true; + } + } + /** * Get the number of bytes read from the container. * @return the number of bytes read from the container. @@ -436,6 +462,10 @@ public void setBytesUsed(long used) { this.bytesUsed.set(used); } + public void setCommittedSpace(boolean committed) { + this.committedSpace = committed; + } + /** * Get the number of bytes used by the container. * @return the number of bytes used by the container. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 9e3bef2f6ba0..22454e60a98c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -152,6 +152,8 @@ private boolean addContainer(Container container, boolean overwrite) throws throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); } missingContainerSet.remove(containerId); + // wish we could have done this from ContainerData.setState + container.getContainerData().commitSpace(); if (container.getContainerData().getState() == RECOVERING) { recoveringContainerMap.put( clock.millis() + recoveringTimeout, containerId); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index dc944bcb1ca0..8c7099fef7b0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -169,6 +169,7 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy String hddsVolumeDir = containerVolume.getHddsRootDir().toString(); // Set volume before getContainerDBFile(), because we may need the // volume to deduce the db file. + containerData.setCommittedSpace(true); containerData.setVolume(containerVolume); long containerID = containerData.getContainerID(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index f286685db430..e9e7129060df 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -105,11 +105,6 @@ public void importContainer(long containerID, Path tarFilePath, ContainerProtos.Result.CONTAINER_EXISTS); } - HddsVolume targetVolume = hddsVolume; - if (targetVolume == null) { - targetVolume = chooseNextVolume(); - } - KeyValueContainerData containerData; TarContainerPacker packer = getPacker(compression); @@ -119,6 +114,13 @@ public void importContainer(long containerID, Path tarFilePath, containerData = getKeyValueContainerData(containerDescriptorYaml); } ContainerUtils.verifyChecksum(containerData, conf); + + HddsVolume targetVolume = hddsVolume; + if (targetVolume == null) { + targetVolume = chooseNextVolume(); + } + + containerData.setCommittedSpace(true); containerData.setVolume(targetVolume); try (InputStream input = Files.newInputStream(tarFilePath)) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 0a31b9746d57..9614975d9f02 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -204,10 +204,18 @@ private KeyValueContainer addContainer(ContainerSet cSet, long cID) data.addMetadata("VOLUME", "shire"); data.addMetadata("owner)", "bilbo"); KeyValueContainer container = new KeyValueContainer(data, conf); + + // build a map of volume and committed bytes before create + Map volumeMap = new HashMap<>(); + for (HddsVolume volume : StorageVolumeUtil.getHddsVolumesList( + volumeSet.getVolumesList())) { + volumeMap.put(getVolumeKey(volume), volume.getCommittedBytes()); + } + container.create(volumeSet, volumeChoosingPolicy, SCM_ID); - commitBytesBefore = container.getContainerData() - .getVolume().getCommittedBytes(); cSet.addContainer(container); + + commitBytesBefore = volumeMap.get(getVolumeKey(container.getContainerData().getVolume())); commitBytesAfter = container.getContainerData() .getVolume().getCommittedBytes(); commitIncrement = commitBytesAfter - commitBytesBefore; @@ -1005,4 +1013,8 @@ public void testListBlock(ContainerTestVersionInfo versionInfo) assertThat(exception.getMessage()) .contains("Count must be a positive number."); } + + private String getVolumeKey(HddsVolume volume) { + return volume.getHddsRootDir().getPath(); + } } From f86e8fc826805415d222202cf6a1885175499802 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Sat, 22 Mar 2025 00:33:08 +0800 Subject: [PATCH 13/18] Add test coverage for VolumeChoosingPolicy(s) --- .../volume/TestCapacityVolumeChoosingPolicy.java | 11 +++++++++++ .../volume/TestRoundRobinVolumeChoosingPolicy.java | 13 +++++++++++++ 2 files changed, 24 insertions(+) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java index d6c97c5f1a36..07ae372a4cd5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java @@ -149,4 +149,15 @@ public void testVolumeChoosingPolicyFactory() VolumeChoosingPolicyFactory.getPolicy(CONF).getClass()); } + @Test + public void testVolumeCommittedSpace() throws Exception { + Map initialCommittedSpace = new HashMap<>(); + volumes.forEach(vol -> + initialCommittedSpace.put(vol, vol.getCommittedBytes())); + + HddsVolume selectedVolume = policy.chooseVolume(volumes, 50); + + assertEquals(initialCommittedSpace.get(selectedVolume) + 50, + selectedVolume.getCommittedBytes()); + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java index 1c07fe7ab7b3..2406011a3d14 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java @@ -25,7 +25,9 @@ import java.nio.file.Path; import java.time.Duration; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.MockSpaceUsageSource; @@ -115,4 +117,15 @@ public void throwsDiskOutOfSpaceIfRequestMoreThanAvailable() { "Most available space: 150 bytes"); } + @Test + public void testVolumeCommittedSpace() throws Exception { + Map initialCommittedSpace = new HashMap<>(); + volumes.forEach(vol -> + initialCommittedSpace.put(vol, vol.getCommittedBytes())); + + HddsVolume selectedVolume = policy.chooseVolume(volumes, 50); + + assertEquals(initialCommittedSpace.get(selectedVolume) + 50, + selectedVolume.getCommittedBytes()); + } } From f2d22b883206b3f1214c8b393c57dbc54853efd2 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Thu, 10 Apr 2025 09:43:45 +0000 Subject: [PATCH 14/18] Revert "Add test coverage for VolumeChoosingPolicy(s)" This reverts commit f86e8fc826805415d222202cf6a1885175499802. --- .../volume/TestCapacityVolumeChoosingPolicy.java | 11 ----------- .../volume/TestRoundRobinVolumeChoosingPolicy.java | 13 ------------- 2 files changed, 24 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java index 07ae372a4cd5..d6c97c5f1a36 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java @@ -149,15 +149,4 @@ public void testVolumeChoosingPolicyFactory() VolumeChoosingPolicyFactory.getPolicy(CONF).getClass()); } - @Test - public void testVolumeCommittedSpace() throws Exception { - Map initialCommittedSpace = new HashMap<>(); - volumes.forEach(vol -> - initialCommittedSpace.put(vol, vol.getCommittedBytes())); - - HddsVolume selectedVolume = policy.chooseVolume(volumes, 50); - - assertEquals(initialCommittedSpace.get(selectedVolume) + 50, - selectedVolume.getCommittedBytes()); - } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java index 2406011a3d14..1c07fe7ab7b3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java @@ -25,9 +25,7 @@ import java.nio.file.Path; import java.time.Duration; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.MockSpaceUsageSource; @@ -117,15 +115,4 @@ public void throwsDiskOutOfSpaceIfRequestMoreThanAvailable() { "Most available space: 150 bytes"); } - @Test - public void testVolumeCommittedSpace() throws Exception { - Map initialCommittedSpace = new HashMap<>(); - volumes.forEach(vol -> - initialCommittedSpace.put(vol, vol.getCommittedBytes())); - - HddsVolume selectedVolume = policy.chooseVolume(volumes, 50); - - assertEquals(initialCommittedSpace.get(selectedVolume) + 50, - selectedVolume.getCommittedBytes()); - } } From d05c13fd592f621c848a42ddf5e18e36af1f4c9e Mon Sep 17 00:00:00 2001 From: peterxcli Date: Thu, 10 Apr 2025 09:43:58 +0000 Subject: [PATCH 15/18] Revert "Set committedSpace for containerData manually after choosing volume" This reverts commit 90740b7e284bdbbc8f2b3260b4e0e53b5115ae2c. --- .../container/common/impl/ContainerData.java | 30 ------------------- .../container/common/impl/ContainerSet.java | 2 -- .../container/keyvalue/KeyValueContainer.java | 1 - .../replication/ContainerImporter.java | 12 ++++---- .../common/impl/TestContainerPersistence.java | 16 ++-------- 5 files changed, 7 insertions(+), 54 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java index a4af2ed2a0b2..7d2074198572 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java @@ -351,32 +351,6 @@ private void releaseCommitSpace() { committedSpace = false; } - /** - * add available space in the container to the committed space in the volume. - * available space is the number of bytes remaining till max capacity. - */ - public void commitSpace() { - long unused = getMaxSize() - getBytesUsed(); - ContainerDataProto.State myState = getState(); - HddsVolume cVol; - - if (committedSpace) { - return; - } - - // Only Open Containers have Committed Space - if (myState != ContainerDataProto.State.OPEN) { - return; - } - - // junit tests do not always set up volume - cVol = getVolume(); - if (unused > 0 && (cVol != null)) { - cVol.incCommittedBytes(unused); - committedSpace = true; - } - } - /** * Get the number of bytes read from the container. * @return the number of bytes read from the container. @@ -462,10 +436,6 @@ public void setBytesUsed(long used) { this.bytesUsed.set(used); } - public void setCommittedSpace(boolean committed) { - this.committedSpace = committed; - } - /** * Get the number of bytes used by the container. * @return the number of bytes used by the container. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 22454e60a98c..9e3bef2f6ba0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -152,8 +152,6 @@ private boolean addContainer(Container container, boolean overwrite) throws throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); } missingContainerSet.remove(containerId); - // wish we could have done this from ContainerData.setState - container.getContainerData().commitSpace(); if (container.getContainerData().getState() == RECOVERING) { recoveringContainerMap.put( clock.millis() + recoveringTimeout, containerId); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 8c7099fef7b0..dc944bcb1ca0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -169,7 +169,6 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy String hddsVolumeDir = containerVolume.getHddsRootDir().toString(); // Set volume before getContainerDBFile(), because we may need the // volume to deduce the db file. - containerData.setCommittedSpace(true); containerData.setVolume(containerVolume); long containerID = containerData.getContainerID(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index 86e1fbd9aec2..f69516f94e17 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -106,6 +106,11 @@ public void importContainer(long containerID, Path tarFilePath, ContainerProtos.Result.CONTAINER_EXISTS); } + HddsVolume targetVolume = hddsVolume; + if (targetVolume == null) { + targetVolume = chooseNextVolume(); + } + KeyValueContainerData containerData; TarContainerPacker packer = getPacker(compression); @@ -115,13 +120,6 @@ public void importContainer(long containerID, Path tarFilePath, containerData = getKeyValueContainerData(containerDescriptorYaml); } ContainerUtils.verifyChecksum(containerData, conf); - - HddsVolume targetVolume = hddsVolume; - if (targetVolume == null) { - targetVolume = chooseNextVolume(); - } - - containerData.setCommittedSpace(true); containerData.setVolume(targetVolume); try (InputStream input = Files.newInputStream(tarFilePath)) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 9614975d9f02..0a31b9746d57 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -204,18 +204,10 @@ private KeyValueContainer addContainer(ContainerSet cSet, long cID) data.addMetadata("VOLUME", "shire"); data.addMetadata("owner)", "bilbo"); KeyValueContainer container = new KeyValueContainer(data, conf); - - // build a map of volume and committed bytes before create - Map volumeMap = new HashMap<>(); - for (HddsVolume volume : StorageVolumeUtil.getHddsVolumesList( - volumeSet.getVolumesList())) { - volumeMap.put(getVolumeKey(volume), volume.getCommittedBytes()); - } - container.create(volumeSet, volumeChoosingPolicy, SCM_ID); + commitBytesBefore = container.getContainerData() + .getVolume().getCommittedBytes(); cSet.addContainer(container); - - commitBytesBefore = volumeMap.get(getVolumeKey(container.getContainerData().getVolume())); commitBytesAfter = container.getContainerData() .getVolume().getCommittedBytes(); commitIncrement = commitBytesAfter - commitBytesBefore; @@ -1013,8 +1005,4 @@ public void testListBlock(ContainerTestVersionInfo versionInfo) assertThat(exception.getMessage()) .contains("Count must be a positive number."); } - - private String getVolumeKey(HddsVolume volume) { - return volume.getHddsRootDir().getPath(); - } } From c04ba06f672ffe2b970825f3e82eaac7d4183714 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Thu, 10 Apr 2025 09:44:07 +0000 Subject: [PATCH 16/18] Revert "Move commit volume space to VolumeChoosingPolicy to make it atomic" This reverts commit 88028caedabfece6c763ec218c8be76638a20570. --- .../container/common/impl/ContainerData.java | 35 +++++++++++++++++++ .../container/common/impl/ContainerSet.java | 2 ++ .../volume/CapacityVolumeChoosingPolicy.java | 5 +-- .../RoundRobinVolumeChoosingPolicy.java | 1 - 4 files changed, 38 insertions(+), 5 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java index 7d2074198572..12c618b226fa 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java @@ -214,6 +214,16 @@ public synchronized void setState(ContainerDataProto.State state) { (state != oldState)) { releaseCommitSpace(); } + + /** + * commit space when container transitions (back) to Open. + * when? perhaps closing a container threw an exception + */ + if ((state == ContainerDataProto.State.OPEN) && + (state != oldState)) { + Preconditions.checkState(getMaxSize() > 0); + commitSpace(); + } } /** @@ -351,6 +361,31 @@ private void releaseCommitSpace() { committedSpace = false; } + /** + * add available space in the container to the committed space in the volume. + * available space is the number of bytes remaining till max capacity. + */ + public void commitSpace() { + long unused = getMaxSize() - getBytesUsed(); + ContainerDataProto.State myState = getState(); + HddsVolume cVol; + + //we don't expect duplicate calls + Preconditions.checkState(!committedSpace); + + // Only Open Containers have Committed Space + if (myState != ContainerDataProto.State.OPEN) { + return; + } + + // junit tests do not always set up volume + cVol = getVolume(); + if (unused > 0 && (cVol != null)) { + cVol.incCommittedBytes(unused); + committedSpace = true; + } + } + /** * Get the number of bytes read from the container. * @return the number of bytes read from the container. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 9e3bef2f6ba0..22454e60a98c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -152,6 +152,8 @@ private boolean addContainer(Container container, boolean overwrite) throws throw new StorageContainerException(e, ContainerProtos.Result.IO_EXCEPTION); } missingContainerSet.remove(containerId); + // wish we could have done this from ContainerData.setState + container.getContainerData().commitSpace(); if (container.getContainerData().getState() == RECOVERING) { recoveringContainerMap.put( clock.millis() + recoveringTimeout, containerId); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java index a8742ef3b3ec..853ed1f19814 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java @@ -92,10 +92,7 @@ public synchronized HddsVolume chooseVolume(List volumes, - firstVolume.getCommittedBytes(); long secondAvailable = secondVolume.getCurrentUsage().getAvailable() - secondVolume.getCommittedBytes(); - HddsVolume selectedVolume = firstAvailable < secondAvailable ? secondVolume : firstVolume; - - selectedVolume.incCommittedBytes(maxContainerSize); - return selectedVolume; + return firstAvailable < secondAvailable ? secondVolume : firstVolume; } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java index 8edf2bfc124a..1a6e5c2b12ed 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java @@ -68,7 +68,6 @@ public synchronized HddsVolume chooseVolume(List volumes, if (hasEnoughSpace) { logIfSomeVolumesOutOfSpace(filter, LOG); nextVolumeIndex.compareAndSet(nextIndex, currentVolumeIndex); - volume.incCommittedBytes(maxContainerSize); return volume; } From afdd3ff4f08ff835d37472f7c69b596a3f4e5b4d Mon Sep 17 00:00:00 2001 From: peterxcli Date: Fri, 11 Apr 2025 11:16:16 +0000 Subject: [PATCH 17/18] Remove synchronized from VolumeChoosingPolicy#chooseVolume at this time --- .../container/common/volume/CapacityVolumeChoosingPolicy.java | 2 +- .../container/common/volume/RoundRobinVolumeChoosingPolicy.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java index 853ed1f19814..0f73f9d009e0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java @@ -47,7 +47,7 @@ public class CapacityVolumeChoosingPolicy implements VolumeChoosingPolicy { // Stores the index of the next volume to be returned. private final Random random = new Random(); @Override - public synchronized HddsVolume chooseVolume(List volumes, + public HddsVolume chooseVolume(List volumes, long maxContainerSize) throws IOException { // No volumes available to choose from diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java index 1a6e5c2b12ed..1fe1e32f2b66 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java @@ -41,7 +41,7 @@ public class RoundRobinVolumeChoosingPolicy implements VolumeChoosingPolicy { private AtomicInteger nextVolumeIndex = new AtomicInteger(0); @Override - public synchronized HddsVolume chooseVolume(List volumes, + public HddsVolume chooseVolume(List volumes, long maxContainerSize) throws IOException { // No volumes available to choose from From c6659a037bdeac9e91e751cd38eb3345f4974ba7 Mon Sep 17 00:00:00 2001 From: peterxcli Date: Fri, 11 Apr 2025 11:19:21 +0000 Subject: [PATCH 18/18] keep patch cleaner --- .../container/common/volume/CapacityVolumeChoosingPolicy.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java index 0f73f9d009e0..10794c8fa72a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java @@ -46,6 +46,7 @@ public class CapacityVolumeChoosingPolicy implements VolumeChoosingPolicy { // Stores the index of the next volume to be returned. private final Random random = new Random(); + @Override public HddsVolume chooseVolume(List volumes, long maxContainerSize) throws IOException {