diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 76bd2fed3027..3752deec5c52 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -321,18 +321,18 @@ public final class ScmConfigKeys { "ozone.scm.pipeline.owner.container.count"; public static final int OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT = 3; // Pipeline placement policy: - // the max number of pipelines can a single datanode be engaged in. - public static final String OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT = - "ozone.scm.datanode.max.pipeline.engagement"; - // Setting to zero by default means this limit doesn't take effect. - public static final int OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT_DEFAULT = 2; + // Upper limit for how many pipelines a datanode can engage in. + public static final String OZONE_DATANODE_PIPELINE_LIMIT = + "ozone.datanode.pipeline.limit"; + public static final int OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT = 2; - // Upper limit for how many pipelines can be created. + // Upper limit for how many pipelines can be created + // across the cluster nodes managed by SCM. // Only for test purpose now. - public static final String OZONE_SCM_PIPELINE_NUMBER_LIMIT = - "ozone.scm.pipeline.number.limit"; + public static final String OZONE_SCM_RATIS_PIPELINE_LIMIT = + "ozone.scm.ratis.pipeline.limit"; // Setting to zero by default means this limit doesn't take effect. - public static final int OZONE_SCM_PIPELINE_NUMBER_LIMIT_DEFAULT = 0; + public static final int OZONE_SCM_RATIS_PIPELINE_LIMIT_DEFAULT = 0; public static final String OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 1dc23735a911..68494943b143 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.pipeline; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Iterator; import java.util.LinkedHashMap; @@ -57,7 +58,7 @@ public final class Pipeline { // Current reported Leader for the pipeline private UUID leaderId; // Timestamp for pipeline upon creation - private Long creationTimestamp; + private Instant creationTimestamp; // Only valid for Ratis THREE pipeline. No need persist. private int nodeIdsHash; @@ -74,7 +75,7 @@ private Pipeline(PipelineID id, ReplicationType type, this.factor = factor; this.state = state; this.nodeStatus = nodeStatus; - this.creationTimestamp = System.currentTimeMillis(); + this.creationTimestamp = Instant.now(); this.nodeIdsHash = 0; } @@ -119,7 +120,7 @@ public PipelineState getPipelineState() { * * @return Creation Timestamp */ - public Long getCreationTimestamp() { + public Instant getCreationTimestamp() { return creationTimestamp; } @@ -128,7 +129,7 @@ public Long getCreationTimestamp() { * * @param creationTimestamp */ - void setCreationTimestamp(Long creationTimestamp) { + void setCreationTimestamp(Instant creationTimestamp) { this.creationTimestamp = creationTimestamp; } @@ -253,7 +254,7 @@ public HddsProtos.Pipeline getProtobufMessage() .setFactor(factor) .setState(PipelineState.getProtobuf(state)) .setLeaderID(leaderId != null ? leaderId.toString() : "") - .setCreationTimeStamp(creationTimestamp) + .setCreationTimeStamp(creationTimestamp.toEpochMilli()) .addAllMembers(nodeStatus.keySet().stream() .map(DatanodeDetails::getProtoBufMessage) .collect(Collectors.toList())); @@ -289,6 +290,7 @@ public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) .setNodes(pipeline.getMembersList().stream() .map(DatanodeDetails::getFromProtoBuf).collect(Collectors.toList())) .setNodesInOrder(pipeline.getMemberOrdersList()) + .setCreateTimestamp(pipeline.getCreationTimeStamp()) .build(); } @@ -357,7 +359,7 @@ public static class Builder { private List nodeOrder = null; private List nodesInOrder = null; private UUID leaderId = null; - private Long creationTimestamp = null; + private Instant creationTimestamp = null; private int nodeIdsHash = 0; public Builder() {} @@ -410,6 +412,11 @@ public Builder setNodesInOrder(List orders) { return this; } + public Builder setCreateTimestamp(long createTimestamp) { + this.creationTimestamp = Instant.ofEpochMilli(createTimestamp); + return this; + } + public Builder setNodeIdsHash(int nodeIdsHash1) { this.nodeIdsHash = nodeIdsHash1; return this; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 3812aed2f14c..9f71ffec861a 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -840,14 +840,14 @@ - ozone.scm.datanode.max.pipeline.engagement + ozone.datanode.pipeline.limit 2 OZONE, SCM, PIPELINE Max number of pipelines per datanode can be engaged in. - ozone.scm.pipeline.number.limit + ozone.scm.ratis.pipeline.limit 0 OZONE, SCM, PIPELINE Upper limit for how many pipelines can be OPEN in SCM. @@ -862,8 +862,9 @@ Timeout for every pipeline to stay in ALLOCATED stage. When pipeline is created, it should be at OPEN stage once pipeline report is successfully received by SCM. - If a pipeline stays at ALLOCATED for too long, it should be scrubbed so that new - pipeline can be created. This timeout is for how long pipeline can stay at ALLOCATED + If a pipeline stays at ALLOCATED longer than the specified period of time, + it should be scrubbed so that new pipeline can be created. + This timeout is for how long pipeline can stay at ALLOCATED stage until it gets scrubbed. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java index 18809ed4450a..6533cb807642 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java @@ -57,8 +57,7 @@ public Set getPipelines(UUID datanode) { * @return Number of pipelines or 0. */ public int getPipelinesCount(UUID datanode) { - Set pipelines = getObjects(datanode); - return pipelines == null ? 0 : pipelines.size(); + return getObjects(datanode).size(); } /** @@ -80,7 +79,7 @@ public synchronized void removePipeline(Pipeline pipeline) { dn2ObjectMap.computeIfPresent(dnId, (k, v) -> { v.remove(pipeline.getId()); - return v.isEmpty() ? null : v; + return v; }); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java index 8c6e5c79b55e..4261a87c4c0d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java @@ -72,8 +72,8 @@ public PipelinePlacementPolicy(final NodeManager nodeManager, this.conf = conf; this.stateManager = stateManager; this.heavyNodeCriteria = conf.getInt( - ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, - ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT_DEFAULT); + ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, + ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT); } /** @@ -113,7 +113,7 @@ boolean meetCriteria(DatanodeDetails datanodeDetails, int nodesRequired) { } boolean meet = (nodeManager.getPipelinesCount(datanodeDetails) - pipelineNumDeductable) < heavyNodeCriteria; - if (!meet) { + if (!meet && LOG.isDebugEnabled()) { LOG.debug("Pipeline Placement: can't place more pipeline on heavy " + "datanode: " + datanodeDetails.getUuid().toString() + " Heaviness: " + nodeManager.getPipelinesCount(datanodeDetails) + @@ -143,17 +143,11 @@ List filterViableNodes( } int initialHealthyNodesCount = healthyNodes.size(); String msg; - if (initialHealthyNodesCount == 0) { - msg = "No healthy nodes found to allocate pipeline."; - LOG.error(msg); - throw new SCMException(msg, SCMException.ResultCodes - .FAILED_TO_FIND_SUITABLE_NODE); - } if (initialHealthyNodesCount < nodesRequired) { - LOG.warn("Not enough healthy nodes to allocate pipeline. %d " - + " datanodes required. Found %d", - nodesRequired, initialHealthyNodesCount); + LOG.warn("Not enough healthy nodes to allocate pipeline." + + nodesRequired + " datanodes required. Found: " + + initialHealthyNodesCount); msg = String.format("Pipeline creation failed due to no sufficient" + " healthy datanodes. Required %d. Found %d.", nodesRequired, initialHealthyNodesCount); @@ -168,15 +162,17 @@ List filterViableNodes( .collect(Collectors.toList()); if (healthyList.size() < nodesRequired) { - LOG.debug("Unable to find enough nodes that meet " + - "the criteria that cannot engage in more than %d pipelines." + - " Nodes required: %d Found: %d, healthy nodes count in " + - "NodeManager: %d.", - heavyNodeCriteria, nodesRequired, healthyList.size(), - initialHealthyNodesCount); - msg = String.format("Pipeline creation failed due to not enough" + - " healthy datanodes after filter. Required %d. Found %d", - nodesRequired, initialHealthyNodesCount); + if (LOG.isDebugEnabled()) { + LOG.debug("Unable to find enough nodes that meet the criteria that" + + " cannot engage in more than" + heavyNodeCriteria + + " pipelines. Nodes required: " + nodesRequired + " Found:" + + healthyList.size() + " healthy nodes count in NodeManager: " + + initialHealthyNodesCount); + } + msg = String.format("Pipeline creation failed because nodes are engaged" + + " in other pipelines and every node can only be engaged in" + + " max %d pipelines. Required %d. Found %d", + heavyNodeCriteria, nodesRequired, healthyList.size()); throw new SCMException(msg, SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 95859070f27d..4865074d7c70 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -84,11 +84,11 @@ public class RatisPipelineProvider implements PipelineProvider { this.placementPolicy = new PipelinePlacementPolicy(nodeManager, stateManager, conf); this.pipelineNumberLimit = conf.getInt( - ScmConfigKeys.OZONE_SCM_PIPELINE_NUMBER_LIMIT, - ScmConfigKeys.OZONE_SCM_PIPELINE_NUMBER_LIMIT_DEFAULT); + ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, + ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT_DEFAULT); this.maxPipelinePerDatanode = conf.getInt( - ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, - ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT_DEFAULT); + ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, + ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT); } private List pickNodesNeverUsed(ReplicationFactor factor) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index 366eabc42dda..8bdd6bb6790b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -113,27 +113,21 @@ static int encodeNodeIdsOfFactorThreePipeline(List nodes) { } /** - * Return first existed pipeline which share the same set of datanodes + * Return the list of pipelines who share the same set of datanodes * with the input pipeline. * @param stateManager PipelineStateManager * @param pipeline input pipeline * @return first matched pipeline */ - static Pipeline checkPipelineContainSameDatanodes( + static List checkPipelineContainSameDatanodes( PipelineStateManager stateManager, Pipeline pipeline) { - List matchedPipelines = stateManager.getPipelines( + return stateManager.getPipelines( HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE) .stream().filter(p -> !p.getId().equals(pipeline.getId()) && (// For all OPEN or ALLOCATED pipelines - p.getPipelineState() == Pipeline.PipelineState.OPEN || - p.getPipelineState() == Pipeline.PipelineState.ALLOCATED) && - p.getNodeIdsHash() == pipeline.getNodeIdsHash()) + p.getPipelineState() != Pipeline.PipelineState.CLOSED && + p.getNodeIdsHash() == pipeline.getNodeIdsHash())) .collect(Collectors.toList()); - if (matchedPipelines.size() == 0) { - return null; - } else { - return matchedPipelines.stream().findFirst().get(); - } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index 67856afea714..48906a45d0c4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -45,6 +45,8 @@ import javax.management.ObjectName; import java.io.File; import java.io.IOException; +import java.time.Duration; +import java.time.Instant; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -128,19 +130,6 @@ public void setPipelineProvider(ReplicationType replicationType, pipelineFactory.setProvider(replicationType, provider); } - private int computeNodeIdHash(Pipeline pipeline) { - if (pipeline.getType() != ReplicationType.RATIS) { - return 0; - } - - if (pipeline.getFactor() != ReplicationFactor.THREE) { - return 0; - } - - return RatisPipelineUtils. - encodeNodeIdsOfFactorThreePipeline(pipeline.getNodes()); - } - private void initializePipelineState() throws IOException { if (pipelineStore.isEmpty()) { LOG.info("No pipeline exists in current db"); @@ -156,7 +145,8 @@ private void initializePipelineState() throws IOException { Pipeline pipeline = Pipeline.getFromProtobuf(pipelineBuilder.setState( HddsProtos.PipelineState.PIPELINE_ALLOCATED).build()); Preconditions.checkNotNull(pipeline); - pipeline.setNodeIdsHash(computeNodeIdHash(pipeline)); + pipeline.setNodeIdsHash(RatisPipelineUtils. + encodeNodeIdsOfFactorThreePipeline(pipeline.getNodes())); stateManager.addPipeline(pipeline); nodeManager.addPipeline(pipeline); } @@ -177,17 +167,20 @@ public synchronized Pipeline createPipeline(ReplicationType type, metrics.incNumPipelineCreated(); metrics.createPerPipelineMetrics(pipeline); } - Pipeline overlapPipeline = RatisPipelineUtils + List overlapPipelines = RatisPipelineUtils .checkPipelineContainSameDatanodes(stateManager, pipeline); - if (overlapPipeline != null) { + if (!overlapPipelines.isEmpty()) { + // Count 1 overlap at a time. metrics.incNumPipelineContainSameDatanodes(); //TODO remove until pipeline allocation is proved equally distributed. - LOG.info("Pipeline: " + pipeline.getId().toString() + - " contains same datanodes as previous pipeline: " + - overlapPipeline.getId().toString() + " nodeIds: " + - pipeline.getNodes().get(0).getUuid().toString() + - ", " + pipeline.getNodes().get(1).getUuid().toString() + - ", " + pipeline.getNodes().get(2).getUuid().toString()); + for (Pipeline overlapPipeline : overlapPipelines) { + LOG.info("Pipeline: " + pipeline.getId().toString() + + " contains same datanodes as previous pipelines: " + + overlapPipeline.getId().toString() + " nodeIds: " + + pipeline.getNodes().get(0).getUuid().toString() + + ", " + pipeline.getNodes().get(1).getUuid().toString() + + ", " + pipeline.getNodes().get(2).getUuid().toString()); + } } return pipeline; } catch (IOException ex) { @@ -373,20 +366,21 @@ public void scrubPipeline(ReplicationType type, ReplicationFactor factor) // Only srub pipeline for RATIS THREE pipeline return; } - Long currentTime = System.currentTimeMillis(); + Instant currentTime = Instant.now(); Long pipelineScrubTimeoutInMills = conf.getTimeDuration( ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); List needToSrubPipelines = stateManager.getPipelines(type, factor, Pipeline.PipelineState.ALLOCATED).stream() - .filter(p -> (currentTime - p.getCreationTimestamp() - >= pipelineScrubTimeoutInMills)) + .filter(p -> currentTime.toEpochMilli() - p.getCreationTimestamp() + .toEpochMilli() >= pipelineScrubTimeoutInMills) .collect(Collectors.toList()); for (Pipeline p : needToSrubPipelines) { LOG.info("srubbing pipeline: id: " + p.getId().toString() + " since it stays at ALLOCATED stage for " + - (currentTime - p.getCreationTimestamp())/60000 + " mins."); + Duration.between(currentTime, p.getCreationTimestamp()).toMinutes() + + " mins."); finalizeAndDestroyPipeline(p, false); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index f35bfe2850eb..10c38a8fadcf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -68,7 +68,7 @@ public static void setUp() throws Exception { .getTestDir(TestCloseContainerEventHandler.class.getSimpleName()); configuration .set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - configuration.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_NUMBER_LIMIT, 16); + configuration.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 16); nodeManager = new MockNodeManager(true, 10); eventQueue = new EventQueue(); pipelineManager = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index 977038ebac71..4cdc46fa2222 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -66,7 +66,7 @@ import org.junit.Test; import org.mockito.Mockito; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; /** * Test DeadNodeHandler. @@ -89,7 +89,7 @@ public void setup() throws IOException, AuthenticationException { storageDir = GenericTestUtils.getTempPath( TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); - conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 0); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 0); eventQueue = new EventQueue(); scm = HddsTestUtils.getScm(conf); nodeManager = (SCMNodeManager) scm.getScmNodeManager(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java index 87e8cf42cc05..41eea3d9dc67 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java @@ -34,8 +34,9 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.List; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; /** @@ -77,7 +78,7 @@ public static Collection inputParams() { @Test public void testPipelineDatanodesIntersection() { NodeManager nodeManager= new MockNodeManager(true, nodeCount); - conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, nodeHeaviness); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, nodeHeaviness); conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false); PipelineStateManager stateManager = new PipelineStateManager(); PipelineProvider provider = new MockRatisPipelineProvider(nodeManager, @@ -92,20 +93,23 @@ public void testPipelineDatanodesIntersection() { Pipeline pipeline = provider.create(HddsProtos.ReplicationFactor.THREE); stateManager.addPipeline(pipeline); nodeManager.addPipeline(pipeline); - Pipeline overlapPipeline = RatisPipelineUtils + List overlapPipelines = RatisPipelineUtils .checkPipelineContainSameDatanodes(stateManager, pipeline); - if (overlapPipeline != null){ + + if (overlapPipelines.isEmpty()){ intersectionCount++; - LOG.info("This pipeline: " + pipeline.getId().toString() + - " overlaps with previous pipeline: " + overlapPipeline.getId() + - ". They share same set of datanodes as: " + - pipeline.getNodesInOrder().get(0).getUuid() + "/" + - pipeline.getNodesInOrder().get(1).getUuid() + "/" + - pipeline.getNodesInOrder().get(2).getUuid() + " and " + - overlapPipeline.getNodesInOrder().get(0).getUuid() + "/" + - overlapPipeline.getNodesInOrder().get(1).getUuid() + "/" + - overlapPipeline.getNodesInOrder().get(2).getUuid() + - " is the same."); + for (Pipeline overlapPipeline : overlapPipelines) { + LOG.info("This pipeline: " + pipeline.getId().toString() + + " overlaps with previous pipeline: " + overlapPipeline.getId() + + ". They share same set of datanodes as: " + + pipeline.getNodesInOrder().get(0).getUuid() + "/" + + pipeline.getNodesInOrder().get(1).getUuid() + "/" + + pipeline.getNodesInOrder().get(2).getUuid() + " and " + + overlapPipeline.getNodesInOrder().get(0).getUuid() + "/" + + overlapPipeline.getNodesInOrder().get(1).getUuid() + "/" + + overlapPipeline.getNodesInOrder().get(2).getUuid() + + " is the same."); + } } createdPipelineCount++; } catch(SCMException e) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index 2fe67f908597..2fff7d901cc7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -34,7 +34,7 @@ import java.util.*; import java.util.stream.Collectors; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; /** * Test for PipelinePlacementPolicy. @@ -50,7 +50,7 @@ public void init() throws Exception { nodeManager = new MockNodeManager(true, PIPELINE_PLACEMENT_MAX_NODES_COUNT); conf = new OzoneConfiguration(); - conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 5); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 5); placementPolicy = new PipelinePlacementPolicy( nodeManager, new PipelineStateManager(), conf); } @@ -185,8 +185,8 @@ private void insertHeavyNodesIntoNodeManager( int considerHeavyCount = conf.getInt( - ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, - ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT_DEFAULT) + 1; + ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, + ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT) + 1; Node2PipelineMap mockMap = new Node2PipelineMap(); for (DatanodeDetails node : nodes) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index 56233594e7c0..a17fc08466df 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -61,7 +61,7 @@ public class TestRatisPipelineProvider { public void init() throws Exception { nodeManager = new MockNodeManager(true, 10); conf = new OzoneConfiguration(); - conf.setInt(ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, + conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, maxPipelinePerNode); stateManager = new PipelineStateManager(); provider = new MockRatisPipelineProvider(nodeManager, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index e6bf7a09d018..deba91b746a5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdds.scm.pipeline; import static org.apache.commons.collections.CollectionUtils.intersection; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; @@ -65,7 +65,7 @@ public class TestSCMPipelineManager { @Before public void setUp() throws Exception { conf = new OzoneConfiguration(); - conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 1); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1); testDir = GenericTestUtils .getTestDir(TestSCMPipelineManager.class.getSimpleName()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java index dcd8402fef47..badfadc22eb9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -62,15 +63,15 @@ public class ListInfoSubcommand implements Callable { public Void call() throws Exception { try (ScmClient scmClient = parent.getParent().createScmClient()) { pipelines = scmClient.listPipelines(); - if (isNullOrEmpty(ipaddress) && isNullOrEmpty(uuid)) { + if (Strings.isNullOrEmpty(ipaddress) && Strings.isNullOrEmpty(uuid)) { getAllNodes(scmClient).stream().forEach(p -> printDatanodeInfo(p)); } else { Stream allNodes = getAllNodes(scmClient).stream(); - if (!isNullOrEmpty(ipaddress)) { + if (!Strings.isNullOrEmpty(ipaddress)) { allNodes = allNodes.filter(p -> p.getIpAddress() .compareToIgnoreCase(ipaddress) == 0); } - if (!isNullOrEmpty(uuid)) { + if (!Strings.isNullOrEmpty(uuid)) { allNodes = allNodes.filter(p -> p.getUuid().toString().equals(uuid)); } allNodes.forEach(p -> printDatanodeInfo(p)); @@ -117,8 +118,4 @@ private void printDatanodeInfo(DatanodeDetails datanode) { + datanode.getHostName() + "/" + relatedPipelineNum + " pipelines) \n" + "Related pipelines: \n" + pipelineListInfo); } - - protected static boolean isNullOrEmpty(String str) { - return ((str == null) || str.trim().isEmpty()); - } } \ No newline at end of file diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java index 8b3b1b3b8cbd..f8ac1d498759 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.cli.pipeline; +import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; import picocli.CommandLine; @@ -53,13 +54,13 @@ public class ListPipelinesSubcommand implements Callable { @Override public Void call() throws Exception { try (ScmClient scmClient = parent.getParent().createScmClient()) { - if (isNullOrEmpty(factor) && isNullOrEmpty(state)) { + if (Strings.isNullOrEmpty(factor) && Strings.isNullOrEmpty(state)) { scmClient.listPipelines().forEach(System.out::println); } else { scmClient.listPipelines().stream() - .filter(p -> ((isNullOrEmpty(factor) || + .filter(p -> ((Strings.isNullOrEmpty(factor) || (p.getFactor().toString().compareToIgnoreCase(factor) == 0)) - && (isNullOrEmpty(state) || + && (Strings.isNullOrEmpty(state) || (p.getPipelineState().toString().compareToIgnoreCase(state) == 0)))) .forEach(System.out::println); @@ -67,8 +68,4 @@ public Void call() throws Exception { return null; } } - - protected static boolean isNullOrEmpty(String str) { - return ((str == null) || str.trim().isEmpty()); - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index acc40317b0c2..7a6143cd48d9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -97,7 +97,7 @@ public void init() throws Exception { conf.setTimeDuration( OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); - conf.setInt(ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 3); + conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3); OMStorage omStore = new OMStorage(conf); omStore.setClusterId(clusterId); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index 0874f8be30eb..a81b4dfe5aae 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -36,7 +36,7 @@ import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; /** @@ -51,7 +51,7 @@ public class TestRatisPipelineCreateAndDestroy { public void init(int numDatanodes) throws Exception { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils.getRandomizedTempPath()); - conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 2); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index 418e86d5b70f..e927e6da1b03 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -34,7 +34,7 @@ import java.util.List; import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT; + .OZONE_DATANODE_PIPELINE_LIMIT; /** * Test for RatisPipelineProvider. @@ -50,7 +50,7 @@ public void init() throws Exception { nodeManager = new MockNodeManager(true, 10); OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); - conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 1); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1); stateManager = new PipelineStateManager(); provider = new MockRatisPipelineProvider(nodeManager, stateManager, conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java index 09c633deeb1f..46751204ddc7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java @@ -38,7 +38,7 @@ import java.util.List; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.junit.Assert.fail; /** @@ -63,7 +63,7 @@ public void setup(int numDatanodes) throws Exception { true); conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "10s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); - conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 50); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 50); clusterBuilder = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index b0b51359f593..de61e102ea1d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -514,9 +514,9 @@ protected void initializeConfiguration() throws IOException { conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, blockSize.get(), streamBufferSizeUnit.get()); // MiniOzoneCluster should have global pipeline upper limit. - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_NUMBER_LIMIT, - pipelineNumLimit == DEFAULT_PIPELIME_LIMIT ? - 2 * numOfDatanodes : pipelineNumLimit); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, + pipelineNumLimit >= DEFAULT_PIPELIME_LIMIT ? + pipelineNumLimit : DEFAULT_PIPELIME_LIMIT); configureTrace(); } @@ -528,7 +528,7 @@ void removeConfiguration() { * Creates a new StorageContainerManager instance. * * @return {@link StorageContainerManager} - *Wa + * * @throws IOException */ protected StorageContainerManager createSCM() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index a4c0ef459012..1adeb06e6c7c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -91,7 +91,7 @@ public void init() throws Exception { conf.setQuietMode(false); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB); - conf.setInt(ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 3); + conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7) .setTotalPipelineNumLimit(10).setBlockSize(blockSize) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index 1f2a6b5b2f6c..52c33e6eff73 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -56,7 +56,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; /** * Tests delete key operation with a slow follower in the datanode @@ -100,7 +100,7 @@ public static void init() throws Exception { 1000, TimeUnit.SECONDS); conf.setLong("hdds.scm.replication.thread.interval", containerReportInterval); - conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 2); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); conf.setQuietMode(false); cluster = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index d86ae5182a48..d5fdf1096214 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -99,7 +99,7 @@ private void init() throws Exception { 1, TimeUnit.SECONDS); conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); - conf.setInt(ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 2); + conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2); conf.setQuietMode(false); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index b60fe3ff1877..575ce851e1d5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -87,7 +87,7 @@ private void startCluster(int datanodes) throws Exception { conf.setTimeDuration( OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); - conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 2); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index 511315f1a631..1b044eb3430d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -57,7 +57,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; /** * This class verifies the watchForCommit Handling by xceiverClient. @@ -94,7 +94,7 @@ private void startCluster(OzoneConfiguration conf) throws Exception { conf.setTimeDuration( OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, 1, TimeUnit.SECONDS); - conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 5); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 5); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index 3cafb7d4c022..54a8f63e2870 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -52,7 +52,7 @@ import java.util.List; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; /** * Test container closing. @@ -75,7 +75,7 @@ public class TestCloseContainerByPipeline { public static void init() throws Exception { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, "1"); - conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 2); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(10) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java index 039c7a098eda..e8eb527fa726 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java @@ -36,7 +36,7 @@ import org.apache.commons.lang3.RandomStringUtils; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_NUMBER_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; @@ -79,7 +79,7 @@ public void init() throws Exception { conf.setBoolean(OZONE_ACL_ENABLED, true); conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); - conf.setInt(OZONE_SCM_PIPELINE_NUMBER_LIMIT, 10); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); cluster = MiniOzoneCluster.newBuilder(conf) .setClusterId(clusterId) .setScmId(scmId) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java index a882dcd3d666..0e57efa7d3f1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java @@ -77,7 +77,7 @@ public void setUp() throws Exception { conf.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 1, SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - conf.setInt(ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 3); + conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfDatanodes)