From dce600fb03f1ab759696448e8e70b56d511d9fee Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Mon, 8 Apr 2024 09:26:22 -0700 Subject: [PATCH 1/7] HDDS-8188. Support unlimited length in ozone admin container list and make it default --- ...ocationProtocolClientSideTranslatorPB.java | 6 +- .../hdds/scm/container/ContainerManager.java | 4 +- .../scm/container/ContainerManagerImpl.java | 3 +- .../scm/server/SCMClientProtocolServer.java | 49 +++++++++------- .../container/TestContainerManagerImpl.java | 5 ++ .../server/TestSCMClientProtocolServer.java | 58 +++++++++++++++++++ .../scm/cli/container/ListSubcommand.java | 5 +- 7 files changed, 103 insertions(+), 27 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index b573ee0d040c..a6349bf06be2 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -399,8 +399,10 @@ public List listContainer(long startContainerID, int count, throws IOException { Preconditions.checkState(startContainerID >= 0, "Container ID cannot be negative."); - Preconditions.checkState(count > 0, - "Container count must be greater than 0."); + if (count <= 0 && count != -1) { + throw new IllegalStateException( + "Container count must be greater than 0, or equal to -1"); + } SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto .newBuilder(); builder.setStartContainerID(startContainerID); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index 2a60e268ff4a..0f5f3bac4708 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -63,7 +63,7 @@ default List getContainers() { * * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= -1 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @@ -87,7 +87,7 @@ default List getContainers() { * * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= 0 + * @param count count must be >= -1 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @param state container state diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 8e1e881c44ea..9e2ab2503d00 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -451,7 +451,8 @@ public SCMHAManager getSCMHAManager() { private static List filterSortAndLimit( ContainerID startID, int count, Set set) { - if (ContainerID.MIN.equals(startID) && count >= set.size()) { + if (ContainerID.MIN.equals(startID) && + (count >= set.size() || count == -1)) { List list = new ArrayList<>(set); Collections.sort(list); return list; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index ecfb92104da2..041c46ba1386 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -462,27 +462,36 @@ public List listContainer(long startContainerID, final ContainerID containerId = ContainerID.valueOf(startContainerID); if (state != null) { if (factor != null) { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> (info.getReplicationFactor() == factor)) - .sorted().limit(count).collect(Collectors.toList()); + Stream containerInfoStream = + scm.getContainerManager().getContainers(state).stream() + .filter(info -> info.containerID().getId() >= startContainerID) + //Filtering EC replication type as EC will not have factor. + .filter(info -> info + .getReplicationType() != HddsProtos.ReplicationType.EC) + .filter(info -> (info.getReplicationFactor() == factor)) + .sorted(); + return count == -1 ? containerInfoStream.collect(Collectors.toList()) : + containerInfoStream.limit(count).collect(Collectors.toList()); } else { - return scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - .sorted().limit(count).collect(Collectors.toList()); + Stream containerInfoStream = + scm.getContainerManager().getContainers(state).stream() + .filter(info -> info.containerID().getId() >= startContainerID) + .sorted(); + return count == -1 ? containerInfoStream.collect(Collectors.toList()) : + containerInfoStream.limit(count).collect(Collectors.toList()); } } else { if (factor != null) { - return scm.getContainerManager().getContainers().stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> info.getReplicationFactor() == factor) - .sorted().limit(count).collect(Collectors.toList()); + Stream containerInfoStream = + scm.getContainerManager().getContainers().stream() + .filter(info -> info.containerID().getId() >= startContainerID) + //Filtering EC replication type as EC will not have factor. + .filter(info -> info + .getReplicationType() != HddsProtos.ReplicationType.EC) + .filter(info -> info.getReplicationFactor() == factor) + .sorted(); + return count == -1 ? containerInfoStream.collect(Collectors.toList()) + : containerInfoStream.limit(count).collect(Collectors.toList()); } else { return scm.getContainerManager().getContainers(containerId, count); } @@ -554,9 +563,9 @@ public List listContainer(long startContainerID, containerStream = containerStream .filter(info -> info.getReplicationType() == replicationType); } - return containerStream.sorted() - .limit(count) - .collect(Collectors.toList()); + Stream containerInfoStream = containerStream.sorted(); + return count == -1 ? containerInfoStream.collect(Collectors.toList()) : + containerInfoStream.limit(count).collect(Collectors.toList()); } catch (Exception ex) { auditSuccess = false; AUDIT.logReadFailure( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java index 25a4a80f233e..001c00f89e09 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -147,6 +147,8 @@ void testGetContainers() throws Exception { containerManager.getContainers(ContainerID.MIN, 10)); assertIds(ids.subList(0, 5), containerManager.getContainers(ContainerID.MIN, 5)); + assertIds(ids, + containerManager.getContainers(ContainerID.MIN, -1)); assertIds(ids, containerManager.getContainers(ids.get(0), 10)); assertIds(ids, containerManager.getContainers(ids.get(0), 100)); @@ -154,6 +156,9 @@ void testGetContainers() throws Exception { containerManager.getContainers(ids.get(5), 100)); assertIds(emptyList(), containerManager.getContainers(ids.get(5), 100, LifeCycleState.CLOSED)); + assertIds(ids, + containerManager.getContainers(ContainerID.MIN, -1, + LifeCycleState.OPEN)); containerManager.updateContainerState(ids.get(0), HddsProtos.LifeCycleEvent.FINALIZE); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 7c06b79a2ffb..8c28b9601b07 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -17,12 +17,19 @@ */ package org.apache.hadoop.hdds.scm.server; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.ReconfigurationHandler; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.ozone.container.common.SCMTestUtils; @@ -35,9 +42,13 @@ import java.io.File; import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -112,4 +123,51 @@ public void testReadOnlyAdmins() throws IOException { UserGroupInformation.reset(); } } + + /** + * Tests listContainer of scm. + */ + @Test + public void testScmListContainer() throws Exception { + SCMClientProtocolServer scmServer = + new SCMClientProtocolServer(new OzoneConfiguration(), + mockStorageContainerManager(), mock(ReconfigurationHandler.class)); + + assertEquals(10, scmServer.listContainer(1, 10, + null, HddsProtos.ReplicationType.RATIS, null).size()); + assertEquals(20, scmServer.listContainer(1, -1, + null, HddsProtos.ReplicationType.RATIS, null).size()); + // Test call from a legacy client, which uses a different method of listContainer + assertEquals(10, scmServer.listContainer(1, 10, null, + HddsProtos.ReplicationFactor.THREE).size()); + assertEquals(20, scmServer.listContainer(1, -1, null, + HddsProtos.ReplicationFactor.THREE).size()); + } + + private StorageContainerManager mockStorageContainerManager() { + List infos = new ArrayList<>(); + for (int i = 0; i < 20; i++) { + infos.add(newContainerInfoForTest()); + } + ContainerManagerImpl containerManager = mock(ContainerManagerImpl.class); + when(containerManager.getContainers()).thenReturn(infos); + StorageContainerManager storageContainerManager = mock(StorageContainerManager.class); + when(storageContainerManager.getContainerManager()).thenReturn(containerManager); + + SCMNodeDetails scmNodeDetails = mock(SCMNodeDetails.class); + when(scmNodeDetails.getClientProtocolServerAddress()).thenReturn(new InetSocketAddress("localhost", 9876)); + when(scmNodeDetails.getClientProtocolServerAddressKey()).thenReturn("test"); + when(storageContainerManager.getScmNodeDetails()).thenReturn(scmNodeDetails); + return storageContainerManager; + } + + private ContainerInfo newContainerInfoForTest() { + return new ContainerInfo.Builder() + .setContainerID(1234) + .setPipelineID(PipelineID.randomId()) + .setReplicationConfig( + RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE)) + .build(); + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index ecc43d04087a..071f7ad63621 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -55,8 +55,9 @@ public class ListSubcommand extends ScmSubcommand { private long startId; @Option(names = {"-c", "--count"}, - description = "Maximum number of containers to list", - defaultValue = "20", showDefaultValue = Visibility.ALWAYS) + description = "Maximum number of containers to list." + + "Make count=-1 default to list all containers", + defaultValue = "-1", showDefaultValue = Visibility.ALWAYS) private int count; @Option(names = {"--state"}, From f1e7c6eaaea54c5ead5c405fa52b420d28d10f91 Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Mon, 8 Apr 2024 11:34:41 -0700 Subject: [PATCH 2/7] update error message of listContainer of StorageContainerLocationProtocolClientSideTranslatorPB class --- ...StorageContainerLocationProtocolClientSideTranslatorPB.java | 3 ++- .../hadoop/hdds/scm/server/TestSCMClientProtocolServer.java | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index a6349bf06be2..32681f830148 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -401,7 +401,8 @@ public List listContainer(long startContainerID, int count, "Container ID cannot be negative."); if (count <= 0 && count != -1) { throw new IllegalStateException( - "Container count must be greater than 0, or equal to -1"); + "Container count must be either greater than 0 " + + "or equal to -1 (list all containers)"); } SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto .newBuilder(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 8c28b9601b07..20a4ff683956 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -163,7 +163,7 @@ private StorageContainerManager mockStorageContainerManager() { private ContainerInfo newContainerInfoForTest() { return new ContainerInfo.Builder() - .setContainerID(1234) + .setContainerID(1) .setPipelineID(PipelineID.randomId()) .setReplicationConfig( RatisReplicationConfig From 28c3347d8e570a6cbf0aa8842d0d256c1bc01de7 Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Mon, 15 Apr 2024 11:47:15 -0700 Subject: [PATCH 3/7] Change to make list all containers non-default, printing a message to stdout indicating the result is truncated --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 6 +++ .../hadoop/hdds/scm/client/ScmClient.java | 4 +- .../StorageContainerLocationProtocol.java | 8 ++-- .../src/main/resources/ozone-default.xml | 7 ++++ ...ocationProtocolClientSideTranslatorPB.java | 12 +++--- .../src/main/proto/ScmAdminProtocol.proto | 1 + .../scm/container/ContainerManagerImpl.java | 6 +++ ...ocationProtocolServerSideTranslatorPB.java | 9 ++-- .../scm/server/SCMClientProtocolServer.java | 41 ++++++++++++------- .../container/TestContainerManagerImpl.java | 5 --- .../server/TestSCMClientProtocolServer.java | 4 -- .../scm/cli/ContainerOperationClient.java | 20 ++++++++- .../scm/cli/container/ContainerCommands.java | 8 ++++ .../scm/cli/container/ListSubcommand.java | 37 ++++++++++++++++- .../hadoop/ozone/TestContainerOperations.java | 19 +++++++++ .../hadoop/ozone/shell/TestOzoneShellHA.java | 27 +++++++++++- .../freon/ClosedContainerReplicator.java | 2 +- 17 files changed, 171 insertions(+), 45 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index d8fdbc1063a9..c8c8f25f70be 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -131,6 +131,12 @@ public final class ScmConfigKeys { "hdds.ratis.snapshot.threshold"; public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; + public static final String HDDS_CONTAINER_LIST_MAX_COUNT = + "hdds.container.list.max.count"; + + public static final int HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT = 4096; + + // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; // 4 MB by default diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 6a46741a06ec..a70665a6f0a0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -124,7 +124,7 @@ void deleteContainer(long containerId, Pipeline pipeline, boolean force) * @return a list of pipeline. * @throws IOException */ - List listContainer(long startContainerID, + Pair, Long> listContainer(long startContainerID, int count) throws IOException; /** @@ -137,7 +137,7 @@ List listContainer(long startContainerID, * @return a list of pipeline. * @throws IOException */ - List listContainer(long startContainerID, int count, + Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 90838366317f..f99e2561dcba 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -148,7 +148,7 @@ List getExistContainerWithPipelinesInBatch( * @return a list of container. * @throws IOException */ - List listContainer(long startContainerID, + Pair, Long> listContainer(long startContainerID, int count) throws IOException; /** @@ -167,7 +167,7 @@ List listContainer(long startContainerID, * @return a list of container. * @throws IOException */ - List listContainer(long startContainerID, + Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException; /** @@ -186,7 +186,7 @@ List listContainer(long startContainerID, * @return a list of container. * @throws IOException */ - List listContainer(long startContainerID, + Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException; @@ -207,7 +207,7 @@ List listContainer(long startContainerID, * @return a list of container. * @throws IOException */ - List listContainer(long startContainerID, + Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) throws IOException; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 5a4b9a22c887..747dfe22c31b 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -161,6 +161,13 @@ this not set. Ideally, this should be mapped to a fast disk like an SSD. + + hdds.container.list.max.count + 4096 + OZONE, CONTAINER, MANAGEMENT + The max number of containers info could be included in + response of ListContainer request. + hdds.datanode.dir diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 32681f830148..eb42c637b385 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -380,19 +380,19 @@ public List getExistContainerWithPipelinesInBatch( * {@inheritDoc} */ @Override - public List listContainer(long startContainerID, int count) + public Pair, Long> listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @Override - public List listContainer(long startContainerID, int count, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @Override - public List listContainer(long startContainerID, int count, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig replicationConfig) @@ -437,12 +437,12 @@ public List listContainer(long startContainerID, int count, .getContainersList()) { containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); } - return containerList; + return Pair.of(containerList, response.getContainerCount()); } @Deprecated @Override - public List listContainer(long startContainerID, int count, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { throw new UnsupportedOperationException("Should no longer be called from " + @@ -1174,7 +1174,7 @@ public void close() { public List getListOfContainers( long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { - return listContainer(startContainerID, count, state); + return listContainer(startContainerID, count, state).getLeft(); } @Override diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index eff95099371c..646edc887edd 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -293,6 +293,7 @@ message SCMListContainerRequestProto { message SCMListContainerResponseProto { repeated ContainerInfoProto containers = 1; + optional int64 containerCount = 2; } message SCMDeleteContainerRequestProto { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 9e2ab2503d00..fde57286099b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -86,6 +86,8 @@ public class ContainerManagerImpl implements ContainerManager { @SuppressWarnings("java:S2245") // no need for secure random private final Random random = new Random(); + private int maxCountOfContainerList; + /** * */ @@ -115,6 +117,10 @@ public ContainerManagerImpl( .getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT); + this.maxCountOfContainerList = conf + .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + this.scmContainerManagerMetrics = SCMContainerManagerMetrics.create(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 16a8cbd5a4f5..d37c4a7c8e2f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -849,21 +849,22 @@ public SCMListContainerResponseProto listContainer( } else if (request.hasFactor()) { factor = request.getFactor(); } - List containerList; + Pair, Long> containerListAndTotalCount; if (factor != null) { // Call from a legacy client - containerList = + containerListAndTotalCount = impl.listContainer(startContainerID, count, state, factor); } else { - containerList = + containerListAndTotalCount = impl.listContainer(startContainerID, count, state, replicationType, repConfig); } SCMListContainerResponseProto.Builder builder = SCMListContainerResponseProto.newBuilder(); - for (ContainerInfo container : containerList) { + for (ContainerInfo container : containerListAndTotalCount.getLeft()) { builder.addContainers(container.getProtobuf()); } + builder.setContainerCount(containerListAndTotalCount.getRight()); return builder.build(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 041c46ba1386..a83cd2de68b6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -412,7 +412,7 @@ private boolean hasRequiredReplicas(ContainerInfo contInfo) { * @throws IOException */ @Override - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count) throws IOException { return listContainer(startContainerID, count, null, null, null); } @@ -428,7 +428,7 @@ public List listContainer(long startContainerID, * @throws IOException */ @Override - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException { return listContainer(startContainerID, count, state, null, null); } @@ -445,7 +445,7 @@ public List listContainer(long startContainerID, */ @Override @Deprecated - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { boolean auditSuccess = true; @@ -470,15 +470,20 @@ public List listContainer(long startContainerID, .getReplicationType() != HddsProtos.ReplicationType.EC) .filter(info -> (info.getReplicationFactor() == factor)) .sorted(); - return count == -1 ? containerInfoStream.collect(Collectors.toList()) : - containerInfoStream.limit(count).collect(Collectors.toList()); + List containerInfos = containerInfoStream.collect( + Collectors.toList()); + return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), + (long)containerInfos.size()); } else { Stream containerInfoStream = scm.getContainerManager().getContainers(state).stream() .filter(info -> info.containerID().getId() >= startContainerID) .sorted(); - return count == -1 ? containerInfoStream.collect(Collectors.toList()) : - containerInfoStream.limit(count).collect(Collectors.toList()); + List containerInfos = containerInfoStream.collect( + Collectors.toList()); + return Pair.of( + containerInfos.stream().limit(count).collect(Collectors.toList()), + (long)containerInfos.size()); } } else { if (factor != null) { @@ -490,10 +495,14 @@ public List listContainer(long startContainerID, .getReplicationType() != HddsProtos.ReplicationType.EC) .filter(info -> info.getReplicationFactor() == factor) .sorted(); - return count == -1 ? containerInfoStream.collect(Collectors.toList()) - : containerInfoStream.limit(count).collect(Collectors.toList()); + List containerInfos = containerInfoStream.collect( + Collectors.toList()); + return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), + (long)containerInfos.size()); } else { - return scm.getContainerManager().getContainers(containerId, count); + List containerInfos = + scm.getContainerManager().getContainers(containerId, count); + return Pair.of(containerInfos, (long)(containerInfos.size())); } } } catch (Exception ex) { @@ -520,7 +529,7 @@ public List listContainer(long startContainerID, * @throws IOException */ @Override - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType replicationType, ReplicationConfig repConfig) throws IOException { @@ -541,7 +550,9 @@ public List listContainer(long startContainerID, final ContainerID containerId = ContainerID.valueOf(startContainerID); if (state == null && replicationType == null && repConfig == null) { // Not filters, so just return everything - return scm.getContainerManager().getContainers(containerId, count); + List containerInfos = + scm.getContainerManager().getContainers(containerId, count); + return Pair.of(containerInfos, (long)containerInfos.size()); } List containerList; @@ -564,8 +575,10 @@ public List listContainer(long startContainerID, .filter(info -> info.getReplicationType() == replicationType); } Stream containerInfoStream = containerStream.sorted(); - return count == -1 ? containerInfoStream.collect(Collectors.toList()) : - containerInfoStream.limit(count).collect(Collectors.toList()); + List containerInfos = containerInfoStream.collect( + Collectors.toList()); + return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), + (long)containerInfos.size()); } catch (Exception ex) { auditSuccess = false; AUDIT.logReadFailure( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java index 001c00f89e09..25a4a80f233e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -147,8 +147,6 @@ void testGetContainers() throws Exception { containerManager.getContainers(ContainerID.MIN, 10)); assertIds(ids.subList(0, 5), containerManager.getContainers(ContainerID.MIN, 5)); - assertIds(ids, - containerManager.getContainers(ContainerID.MIN, -1)); assertIds(ids, containerManager.getContainers(ids.get(0), 10)); assertIds(ids, containerManager.getContainers(ids.get(0), 100)); @@ -156,9 +154,6 @@ void testGetContainers() throws Exception { containerManager.getContainers(ids.get(5), 100)); assertIds(emptyList(), containerManager.getContainers(ids.get(5), 100, LifeCycleState.CLOSED)); - assertIds(ids, - containerManager.getContainers(ContainerID.MIN, -1, - LifeCycleState.OPEN)); containerManager.updateContainerState(ids.get(0), HddsProtos.LifeCycleEvent.FINALIZE); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 20a4ff683956..f0958aeaa586 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -135,13 +135,9 @@ public void testScmListContainer() throws Exception { assertEquals(10, scmServer.listContainer(1, 10, null, HddsProtos.ReplicationType.RATIS, null).size()); - assertEquals(20, scmServer.listContainer(1, -1, - null, HddsProtos.ReplicationType.RATIS, null).size()); // Test call from a legacy client, which uses a different method of listContainer assertEquals(10, scmServer.listContainer(1, 10, null, HddsProtos.ReplicationFactor.THREE).size()); - assertEquals(20, scmServer.listContainer(1, -1, null, - HddsProtos.ReplicationFactor.THREE).size()); } private StorageContainerManager mockStorageContainerManager() { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index d51479d44bbb..2d495e9c0db9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -82,6 +82,7 @@ public class ContainerOperationClient implements ScmClient { private final boolean containerTokenEnabled; private final OzoneConfiguration configuration; private XceiverClientManager xceiverClientManager; + private int maxCountOfContainerList; public synchronized XceiverClientManager getXceiverClientManager() throws IOException { @@ -109,6 +110,9 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { } containerTokenEnabled = conf.getBoolean(HDDS_CONTAINER_TOKEN_ENABLED, HDDS_CONTAINER_TOKEN_ENABLED_DEFAULT); + maxCountOfContainerList = conf + .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); } private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf) @@ -338,17 +342,29 @@ public void deleteContainer(long containerID, boolean force) } @Override - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count) throws IOException { + if (count > maxCountOfContainerList) { + LOG.error("Attempting to list {} containers. However, this exceeds" + + " the cluster's current limit of {}. The results will be capped at the" + + " maximum allowed count.", count, maxCountOfContainerList); + count = maxCountOfContainerList; + } return storageContainerLocationClient.listContainer( startContainerID, count); } @Override - public List listContainer(long startContainerID, + public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationType repType, ReplicationConfig replicationConfig) throws IOException { + if (count > maxCountOfContainerList) { + LOG.error("Attempting to list {} containers. However, this exceeds" + + " the cluster's current limit of {}. The results will be capped at the" + + " maximum allowed count.", count, maxCountOfContainerList); + count = maxCountOfContainerList; + } return storageContainerLocationClient.listContainer( startContainerID, count, state, repType, replicationConfig); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index 54c69273f0bc..15dd873491cc 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -26,6 +26,7 @@ import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; @@ -51,6 +52,9 @@ public class ContainerCommands implements Callable, SubcommandWithParent { @Spec private CommandSpec spec; + @ParentCommand + private OzoneAdmin parent; + @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); @@ -61,4 +65,8 @@ public Void call() throws Exception { public Class getParentType() { return OzoneAdmin.class; } + + public OzoneAdmin getParent() { + return parent; + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index 071f7ad63621..ec5eb71ee29c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -21,11 +21,13 @@ import java.util.List; import com.google.common.base.Strings; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -37,6 +39,7 @@ import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -60,6 +63,14 @@ public class ListSubcommand extends ScmSubcommand { defaultValue = "-1", showDefaultValue = Visibility.ALWAYS) private int count; + @Option(names = {"-a", "--all"}, + description = "List all results. However the total number of containers might exceed " + + " the cluster's limit \"hdds.container.list.max.count\"." + + " The results will be capped at the " + + " maximum allowed count.", + defaultValue = "false") + private boolean all; + @Option(names = {"--state"}, description = "Container state(OPEN, CLOSING, QUASI_CLOSED, CLOSED, " + "DELETING, DELETED)") @@ -76,6 +87,9 @@ public class ListSubcommand extends ScmSubcommand { private static final ObjectWriter WRITER; + @ParentCommand + private ContainerCommands parent; + static { ObjectMapper mapper = new ObjectMapper() .registerModule(new JavaTimeModule()) @@ -106,12 +120,31 @@ public void execute(ScmClient scmClient) throws IOException { ReplicationType.fromProto(type), replication, new OzoneConfiguration()); } - List containerList = + + if (all) { + System.out.printf("Attempting to list all containers." + + " The total number of container might exceed" + + " the cluster's current limit of %s. The results will be capped at the" + + " maximum allowed count.%n", ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + count = parent.getParent().getOzoneConf() + .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + } + + Pair, Long> containerListAndTotalCount = scmClient.listContainer(startId, count, state, type, repConfig); // Output data list - for (ContainerInfo container : containerList) { + for (ContainerInfo container : containerListAndTotalCount.getLeft()) { outputContainerInfo(container); } + + if (containerListAndTotalCount.getRight() > count) { + System.out.printf("Container List is truncated since it's too long. " + + "List the first %d records of %d. " + + "User won't be able to view the full list of containers until " + + "pagination feature is supported. %n", + count, containerListAndTotalCount.getRight()); + } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index 5f8f34a2e3ce..43a5ea0158d3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -62,6 +62,7 @@ public static void setup() throws Exception { ozoneConf = new OzoneConfiguration(); ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); + ozoneConf.set(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, "1"); cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build(); storageClient = new ContainerOperationClient(ozoneConf); cluster.waitForClusterToBeReady(); @@ -88,6 +89,24 @@ public void testCreate() throws Exception { .getContainerID()); } + /** + * Test to try to list number of containers over the max number Ozone allows. + * @throws Exception + */ + @Test + public void testListContainerExceedMaxAllowedCountOperations() throws Exception { + // create 2 containers in cluster where the limit of max count for + // listing container is set to 1 + for (int i = 0; i < 2; i++) { + storageClient.createContainer(HddsProtos + .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor + .ONE, OzoneConsts.OZONE); + } + + assertEquals(1, storageClient.listContainer(0, 2) + .getLeft().size()); + } + /** * A simple test to get Pipeline with {@link ContainerOperationClient}. * @throws Exception diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index f86fd46946c4..870ff4d9be82 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -33,6 +33,7 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileChecksum; @@ -145,6 +146,8 @@ public class TestOzoneShellHA { private static String omServiceId; private static int numOfOMs; + private static OzoneConfiguration ozoneConfiguration; + /** * Create a MiniOzoneCluster for testing with using distributed Ozone * handler type. @@ -186,6 +189,8 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI(miniKMS)); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + conf.setInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, 1); + ozoneConfiguration = conf; MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) @@ -221,7 +226,7 @@ public static void shutdown() { @BeforeEach public void setup() throws UnsupportedEncodingException { ozoneShell = new OzoneShell(); - ozoneAdminShell = new OzoneAdmin(); + ozoneAdminShell = new OzoneAdmin(ozoneConfiguration); System.setOut(new PrintStream(out, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(err, false, DEFAULT_ENCODING)); } @@ -572,6 +577,26 @@ public void testOzoneAdminCmdList() throws UnsupportedEncodingException { execute(ozoneAdminShell, args); } + @Test + public void testOzoneAdminCmdListAllContainer() + throws UnsupportedEncodingException { + String[] args1 = new String[] {"container", "create", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + for (int i = 0; i < 2; i++) { + execute(ozoneAdminShell, args1); + } + + String[] args = new String[] {"container", "list", "-a", "--scm", + "localhost:" + cluster.getStorageContainerManager().getClientRpcPort()}; + execute(ozoneAdminShell, args); + assertEquals(1, getNumOfContainers()); + } + + private int getNumOfContainers() + throws UnsupportedEncodingException { + return out.toString(DEFAULT_ENCODING).split("\"containerID\" :").length - 1; + } + /** * Helper function to retrieve Ozone client configuration for trash testing. * @param hostPrefix Scheme + Authority. e.g. ofs://om-service-test1 diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index d471c13462f7..ccfac3711a82 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -101,7 +101,7 @@ public Void call() throws Exception { new ContainerOperationClient(conf); final List containerInfos = - containerOperationClient.listContainer(0L, 1_000_000); + containerOperationClient.listContainer(0L, 1_000_000).getLeft(); //logic same as the download+import on the destination datanode initializeReplicationSupervisor(conf, containerInfos.size() * 2); From 675888a4aed7c606bad9ec3003e26f393c76bd99 Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Mon, 15 Apr 2024 16:31:42 -0700 Subject: [PATCH 4/7] fix compilation error in TestSCMClientProtocolServer --- .../java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 1 - ...ageContainerLocationProtocolClientSideTranslatorPB.java | 7 ++----- .../apache/hadoop/hdds/scm/container/ContainerManager.java | 4 ++-- .../hadoop/hdds/scm/container/ContainerManagerImpl.java | 3 +-- .../hdds/scm/server/TestSCMClientProtocolServer.java | 4 ++-- 5 files changed, 7 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index c8c8f25f70be..b90c80e20d13 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -136,7 +136,6 @@ public final class ScmConfigKeys { public static final int HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT = 4096; - // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; // 4 MB by default diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index eb42c637b385..b0ae35713b4d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -399,11 +399,8 @@ public Pair, Long> listContainer(long startContainerID, int throws IOException { Preconditions.checkState(startContainerID >= 0, "Container ID cannot be negative."); - if (count <= 0 && count != -1) { - throw new IllegalStateException( - "Container count must be either greater than 0 " + - "or equal to -1 (list all containers)"); - } + Preconditions.checkState(count > 0, + "Container count must be greater than 0."); SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto .newBuilder(); builder.setStartContainerID(startContainerID); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index 0f5f3bac4708..2a60e268ff4a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -63,7 +63,7 @@ default List getContainers() { * * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= -1 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @@ -87,7 +87,7 @@ default List getContainers() { * * @param startID start containerID, >=0, * start searching at the head if 0. - * @param count count must be >= -1 + * @param count count must be >= 0 * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big. * @param state container state diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index fde57286099b..a7818ff4760d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -457,8 +457,7 @@ public SCMHAManager getSCMHAManager() { private static List filterSortAndLimit( ContainerID startID, int count, Set set) { - if (ContainerID.MIN.equals(startID) && - (count >= set.size() || count == -1)) { + if (ContainerID.MIN.equals(startID) && count >= set.size()) { List list = new ArrayList<>(set); Collections.sort(list); return list; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index f0958aeaa586..3629b9ca1b3b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -134,10 +134,10 @@ public void testScmListContainer() throws Exception { mockStorageContainerManager(), mock(ReconfigurationHandler.class)); assertEquals(10, scmServer.listContainer(1, 10, - null, HddsProtos.ReplicationType.RATIS, null).size()); + null, HddsProtos.ReplicationType.RATIS, null).getLeft().size()); // Test call from a legacy client, which uses a different method of listContainer assertEquals(10, scmServer.listContainer(1, 10, null, - HddsProtos.ReplicationFactor.THREE).size()); + HddsProtos.ReplicationFactor.THREE).getLeft().size()); } private StorageContainerManager mockStorageContainerManager() { From b5eb86b9eb87a2f9840a00bdb3492b676f246f20 Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Mon, 15 Apr 2024 20:55:23 -0700 Subject: [PATCH 5/7] update method doc --- .../org/apache/hadoop/hdds/scm/client/ScmClient.java | 6 ++++-- .../protocol/StorageContainerLocationProtocol.java | 12 ++++++++---- .../hdds/scm/server/SCMClientProtocolServer.java | 12 ++++++++---- .../hdds/scm/server/TestSCMClientProtocolServer.java | 2 +- .../hdds/scm/cli/ContainerOperationClient.java | 2 +- 5 files changed, 22 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index a70665a6f0a0..a4debf1facc1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -121,7 +121,8 @@ void deleteContainer(long containerId, Pipeline pipeline, boolean force) * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, @@ -134,7 +135,8 @@ Pair, Long> listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container of this state will be returned. * @param replicationConfig container replication Config. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, int count, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index f99e2561dcba..aa0aa5d78171 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -145,7 +145,8 @@ List getExistContainerWithPipelinesInBatch( * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big) * - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, @@ -164,7 +165,8 @@ Pair, Long> listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, @@ -183,7 +185,8 @@ Pair, Long> listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param factor Container factor - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, @@ -204,7 +207,8 @@ Pair, Long> listContainer(long startContainerID, * value instead of being unlimited in case the db is very big) * @param state Container with this state will be returned. * @param replicationConfig Replication config for the containers - * @return a list of container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ Pair, Long> listContainer(long startContainerID, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index a83cd2de68b6..f26d15b37a88 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -408,7 +408,8 @@ private boolean hasRequiredReplicas(ContainerInfo contInfo) { * @param startContainerID start containerID. * @param count count must be {@literal >} 0. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ @Override @@ -424,7 +425,8 @@ public Pair, Long> listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ @Override @@ -440,7 +442,8 @@ public Pair, Long> listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * @param factor Container factor. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ @Override @@ -525,7 +528,8 @@ public Pair, Long> listContainer(long startContainerID, * @param count count must be {@literal >} 0. * @param state Container with this state will be returned. * @param repConfig Replication Config for the container. - * @return a list of pipeline. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. * @throws IOException */ @Override diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java index 3629b9ca1b3b..06fb0321c479 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -142,7 +142,7 @@ public void testScmListContainer() throws Exception { private StorageContainerManager mockStorageContainerManager() { List infos = new ArrayList<>(); - for (int i = 0; i < 20; i++) { + for (int i = 0; i < 10; i++) { infos.add(newContainerInfoForTest()); } ContainerManagerImpl containerManager = mock(ContainerManagerImpl.class); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 2d495e9c0db9..7b12094a5f41 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -360,7 +360,7 @@ public Pair, Long> listContainer(long startContainerID, HddsProtos.ReplicationType repType, ReplicationConfig replicationConfig) throws IOException { if (count > maxCountOfContainerList) { - LOG.error("Attempting to list {} containers. However, this exceeds" + + LOG.warn("Attempting to list {} containers. However, this exceeds" + " the cluster's current limit of {}. The results will be capped at the" + " maximum allowed count.", count, maxCountOfContainerList); count = maxCountOfContainerList; From 5d83cbb89e45491edba2163d38f2d67e1e8861ec Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Tue, 16 Apr 2024 12:13:05 -0700 Subject: [PATCH 6/7] update command message if count is greater than maxCountAllowed of configuration --- .../scm/cli/container/ListSubcommand.java | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index ec5eb71ee29c..cc5178b2c11e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -58,9 +58,8 @@ public class ListSubcommand extends ScmSubcommand { private long startId; @Option(names = {"-c", "--count"}, - description = "Maximum number of containers to list." + - "Make count=-1 default to list all containers", - defaultValue = "-1", showDefaultValue = Visibility.ALWAYS) + description = "Maximum number of containers to list.", + defaultValue = "20", showDefaultValue = Visibility.ALWAYS) private int count; @Option(names = {"-a", "--all"}, @@ -121,14 +120,22 @@ public void execute(ScmClient scmClient) throws IOException { replication, new OzoneConfiguration()); } + int maxCountAllowed = parent.getParent().getOzoneConf() + .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, + ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); if (all) { System.out.printf("Attempting to list all containers." + " The total number of container might exceed" + " the cluster's current limit of %s. The results will be capped at the" + " maximum allowed count.%n", ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); - count = parent.getParent().getOzoneConf() - .getInt(ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT, - ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + count = maxCountAllowed; + } else { + if (count > maxCountAllowed) { + System.out.printf("Attempting to list the first %d records of containers." + + " However it exceeds the cluster's current limit of %d. The results will be capped at the" + + " maximum allowed count.%n", count, ScmConfigKeys.HDDS_CONTAINER_LIST_MAX_COUNT_DEFAULT); + count = maxCountAllowed; + } } Pair, Long> containerListAndTotalCount = From b4bdd905ccb2a044768b4f10bf7122915048d771 Mon Sep 17 00:00:00 2001 From: DaveTeng0 Date: Thu, 2 May 2024 12:41:56 -0700 Subject: [PATCH 7/7] refactor listContainer method --- .../scm/server/SCMClientProtocolServer.java | 188 +++++++----------- 1 file changed, 73 insertions(+), 115 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index f26d15b37a88..fa2218ac2de5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -103,6 +103,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -451,63 +452,24 @@ public Pair, Long> listContainer(long startContainerID, public Pair, Long> listContainer(long startContainerID, int count, HddsProtos.LifeCycleState state, HddsProtos.ReplicationFactor factor) throws IOException { + return listContainerInternal(startContainerID, count, state, factor, null, null); + } + + private Pair, Long> listContainerInternal(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("startContainerID", String.valueOf(startContainerID)); - auditMap.put("count", String.valueOf(count)); - if (state != null) { - auditMap.put("state", state.name()); - } - if (factor != null) { - auditMap.put("factor", factor.name()); - } + Map auditMap = buildAuditMap(startContainerID, count, state, factor, replicationType, repConfig); + try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state != null) { - if (factor != null) { - Stream containerInfoStream = - scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> (info.getReplicationFactor() == factor)) - .sorted(); - List containerInfos = containerInfoStream.collect( - Collectors.toList()); - return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), - (long)containerInfos.size()); - } else { - Stream containerInfoStream = - scm.getContainerManager().getContainers(state).stream() - .filter(info -> info.containerID().getId() >= startContainerID) - .sorted(); - List containerInfos = containerInfoStream.collect( - Collectors.toList()); - return Pair.of( - containerInfos.stream().limit(count).collect(Collectors.toList()), - (long)containerInfos.size()); - } - } else { - if (factor != null) { - Stream containerInfoStream = - scm.getContainerManager().getContainers().stream() - .filter(info -> info.containerID().getId() >= startContainerID) - //Filtering EC replication type as EC will not have factor. - .filter(info -> info - .getReplicationType() != HddsProtos.ReplicationType.EC) - .filter(info -> info.getReplicationFactor() == factor) - .sorted(); - List containerInfos = containerInfoStream.collect( - Collectors.toList()); - return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), - (long)containerInfos.size()); - } else { - List containerInfos = - scm.getContainerManager().getContainers(containerId, count); - return Pair.of(containerInfos, (long)(containerInfos.size())); - } - } + Stream containerStream = + buildContainerStream(factor, replicationType, repConfig, getBaseContainerStream(state)); + List containerInfos = + containerStream.filter(info -> info.containerID().getId() >= startContainerID) + .sorted().collect(Collectors.toList()); + return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), (long) containerInfos.size()); } catch (Exception ex) { auditSuccess = false; AUDIT.logReadFailure( @@ -521,81 +483,77 @@ public Pair, Long> listContainer(long startContainerID, } } - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * @param state Container with this state will be returned. - * @param repConfig Replication Config for the container. - * @return a list of containers capped by max count allowed - * in "hdds.container.list.max.count" and total number of containers. - * @throws IOException - */ - @Override - public Pair, Long> listContainer(long startContainerID, - int count, HddsProtos.LifeCycleState state, - HddsProtos.ReplicationType replicationType, - ReplicationConfig repConfig) throws IOException { - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); + private Stream buildContainerStream(HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig, + Stream containerStream) { + if (factor != null) { + containerStream = containerStream.filter(info -> info.getReplicationType() != HddsProtos.ReplicationType.EC) + .filter(info -> info.getReplicationFactor() == factor); + } else if (repConfig != null) { + // If we have repConfig filter by it, as it includes repType too. + // Otherwise, we may have a filter just for repType, eg all EC containers + // without filtering on their replication scheme + containerStream = containerStream + .filter(info -> info.getReplicationConfig().equals(repConfig)); + } else if (replicationType != null) { + containerStream = containerStream.filter(info -> info.getReplicationType() == replicationType); + } + return containerStream; + } + + private Stream getBaseContainerStream(HddsProtos.LifeCycleState state) { + if (state != null) { + return scm.getContainerManager().getContainers(state).stream(); + } else { + return scm.getContainerManager().getContainers().stream(); + } + } + + private Map buildAuditMap(long startContainerID, int count, + HddsProtos.LifeCycleState state, + HddsProtos.ReplicationFactor factor, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) { + Map auditMap = new HashMap<>(); auditMap.put("startContainerID", String.valueOf(startContainerID)); auditMap.put("count", String.valueOf(count)); if (state != null) { auditMap.put("state", state.name()); } + if (factor != null) { + auditMap.put("factor", factor.name()); + } if (replicationType != null) { auditMap.put("replicationType", replicationType.toString()); } if (repConfig != null) { auditMap.put("replicationConfig", repConfig.toString()); } - try { - final ContainerID containerId = ContainerID.valueOf(startContainerID); - if (state == null && replicationType == null && repConfig == null) { - // Not filters, so just return everything - List containerInfos = - scm.getContainerManager().getContainers(containerId, count); - return Pair.of(containerInfos, (long)containerInfos.size()); - } - List containerList; - if (state != null) { - containerList = scm.getContainerManager().getContainers(state); - } else { - containerList = scm.getContainerManager().getContainers(); - } + return auditMap; + } - Stream containerStream = containerList.stream() - .filter(info -> info.containerID().getId() >= startContainerID); - // If we have repConfig filter by it, as it includes repType too. - // Otherwise, we may have a filter just for repType, eg all EC containers - // without filtering on their replication scheme - if (repConfig != null) { - containerStream = containerStream - .filter(info -> info.getReplicationConfig().equals(repConfig)); - } else if (replicationType != null) { - containerStream = containerStream - .filter(info -> info.getReplicationType() == replicationType); - } - Stream containerInfoStream = containerStream.sorted(); - List containerInfos = containerInfoStream.collect( - Collectors.toList()); - return Pair.of(containerInfos.stream().limit(count).collect(Collectors.toList()), - (long)containerInfos.size()); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); - throw ex; - } finally { - if (auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); - } - } + /** + * Lists a range of containers and get their info. + * + * @param startContainerID start containerID. + * @param count count must be {@literal >} 0. + * @param state Container with this state will be returned. + * @param repConfig Replication Config for the container. + * @return a list of containers capped by max count allowed + * in "hdds.container.list.max.count" and total number of containers. + * @throws IOException + */ + @Override + public Pair, Long> listContainer(long startContainerID, + int count, HddsProtos.LifeCycleState state, + HddsProtos.ReplicationType replicationType, + ReplicationConfig repConfig) throws IOException { + return listContainerInternal(startContainerID, count, state, null, replicationType, repConfig); } + @Override public void deleteContainer(long containerID) throws IOException { Map auditMap = Maps.newHashMap();