From 72d0d69a7ffc3a4c9ad96d1c15624b5d8470d17d Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Thu, 30 Nov 2023 19:09:00 +0300 Subject: [PATCH 1/9] HDDS-9807. use the same algorithm for SCM to detect an hdds volume availability as on datanode to allocate a new container --- .../common/impl/StorageLocationReport.java | 25 +++++- .../StorageLocationReportMXBean.java | 2 + .../common/volume/AvailableSpaceFilter.java | 21 +++-- .../common/volume/MutableVolumeSet.java | 4 + .../ScmServerDatanodeHeartbeatProtocol.proto | 1 + .../hdds/scm/SCMCommonPlacementPolicy.java | 14 +++- .../hdds/scm/pipeline/PipelineProvider.java | 8 +- .../scm/pipeline/RatisPipelineProvider.java | 2 +- .../scm/TestSCMCommonPlacementPolicy.java | 78 ++++++++++++++++++- 9 files changed, 137 insertions(+), 18 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java index 0222050da5e5..8d707c858904 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java @@ -42,17 +42,20 @@ public final class StorageLocationReport implements private final long capacity; private final long scmUsed; private final long remaining; + private final long committed; private final StorageType storageType; private final String storageLocation; + @SuppressWarnings("checkstyle:parameternumber") private StorageLocationReport(String id, boolean failed, long capacity, - long scmUsed, long remaining, StorageType storageType, + long scmUsed, long remaining, long committed, StorageType storageType, String storageLocation) { this.id = id; this.failed = failed; this.capacity = capacity; this.scmUsed = scmUsed; this.remaining = remaining; + this.committed = committed; this.storageType = storageType; this.storageLocation = storageLocation; } @@ -82,6 +85,11 @@ public long getRemaining() { return remaining; } + @Override + public long getCommitted() { + return committed; + } + @Override public String getStorageLocation() { return storageLocation; @@ -162,6 +170,7 @@ public StorageReportProto getProtoBufMessage() throws IOException { .setCapacity(getCapacity()) .setScmUsed(getScmUsed()) .setRemaining(getRemaining()) + .setCommitted(getCommitted()) .setStorageType(getStorageTypeProto()) .setStorageLocation(getStorageLocation()) .setFailed(isFailed()) @@ -266,6 +275,7 @@ public static class Builder { private long capacity; private long scmUsed; private long remaining; + private long committed; private StorageType storageType; private String storageLocation; @@ -334,6 +344,17 @@ public Builder setStorageType(StorageType storageTypeValue) { return this; } + /** + * Sets the committed bytes count. + * (bytes for previously created containers) + * @param committed previously created containers size + * @return StorageLocationReport.Builder + */ + public Builder setCommitted(long committed) { + this.committed = committed; + return this; + } + /** * Sets the storageLocation. * @@ -352,7 +373,7 @@ public Builder setStorageLocation(String storageLocationValue) { */ public StorageLocationReport build() { return new StorageLocationReport(id, failed, capacity, scmUsed, - remaining, storageType, storageLocation); + remaining, committed, storageType, storageLocation); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java index fd063678137d..707593bb7d89 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java @@ -33,6 +33,8 @@ public interface StorageLocationReportMXBean { long getRemaining(); + long getCommitted(); + String getStorageLocation(); String getStorageTypeName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java index 13041eb4d662..3ee33b75ec98 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.ozone.container.common.volume; +import org.apache.hadoop.hdds.conf.ConfigurationSource; + import java.util.HashMap; import java.util.Map; import java.util.function.Predicate; @@ -25,7 +27,7 @@ * Filter for selecting volumes with enough space for a new container. * Keeps track of ineligible volumes for logging/debug purposes. */ -class AvailableSpaceFilter implements Predicate { +public class AvailableSpaceFilter implements Predicate { private final long requiredSpace; private final Map fullVolumes = @@ -42,10 +44,8 @@ public boolean test(HddsVolume vol) { long free = vol.getAvailable(); long committed = vol.getCommittedBytes(); long available = free - committed; - long volumeFreeSpace = - VolumeUsage.getMinVolumeFreeSpace(vol.getConf(), volumeCapacity); - boolean hasEnoughSpace = - available > Math.max(requiredSpace, volumeFreeSpace); + boolean hasEnoughSpace = hasVolumeEnoughSpace(volumeCapacity, free, + committed, requiredSpace, vol.getConf()); mostAvailableSpace = Math.max(available, mostAvailableSpace); @@ -70,6 +70,17 @@ public String toString() { ", volumes: " + fullVolumes; } + public static boolean hasVolumeEnoughSpace(long volumeCapacity, + long volumeAvailableSpace, + long volumeCommittedBytesCount, + long requiredSpace, + ConfigurationSource conf) { + long volumeFreeSpace = + VolumeUsage.getMinVolumeFreeSpace(conf, volumeCapacity); + return (volumeAvailableSpace - volumeCommittedBytesCount) > + Math.max(requiredSpace, volumeFreeSpace); + } + private static class AvailableSpace { private final long free; private final long committed; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 985ddea8deb8..3c0b6e618ee1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -464,6 +464,7 @@ public StorageLocationReport[] getStorageReport() { long scmUsed = 0; long remaining = 0; long capacity = 0; + long committed = 0; String rootDir = ""; failed = true; if (volumeInfo.isPresent()) { @@ -472,6 +473,8 @@ public StorageLocationReport[] getStorageReport() { scmUsed = volumeInfo.get().getScmUsed(); remaining = volumeInfo.get().getAvailable(); capacity = volumeInfo.get().getCapacity(); + committed = (volume instanceof HddsVolume) ? + ((HddsVolume) volume).getCommittedBytes() : 0; failed = false; } catch (UncheckedIOException ex) { LOG.warn("Failed to get scmUsed and remaining for container " + @@ -491,6 +494,7 @@ public StorageLocationReport[] getStorageReport() { .setCapacity(capacity) .setRemaining(remaining) .setScmUsed(scmUsed) + .setCommitted(committed) .setStorageType(volume.getStorageType()); StorageLocationReport r = builder.build(); reports[counter++] = r; diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index de9e39789b51..973ab736a68d 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -179,6 +179,7 @@ message StorageReportProto { optional uint64 remaining = 5 [default = 0]; optional StorageTypeProto storageType = 6 [default = DISK]; optional bool failed = 7 [default = false]; + optional uint64 committed = 8 [default = 0]; } message MetadataStorageReportProto { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 90140c44810b..906ebf324967 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -51,6 +51,8 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.apache.hadoop.ozone.container.common.volume.AvailableSpaceFilter.hasVolumeEnoughSpace; + /** * This policy implements a set of invariants which are common * for all basic placement policies, acts as the repository of helper @@ -274,7 +276,7 @@ public List filterNodesWithSpace(List nodes, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { List nodesWithSpace = nodes.stream().filter(d -> - hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired)) + hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired, conf)) .collect(Collectors.toList()); if (nodesWithSpace.size() < nodesRequired) { @@ -298,7 +300,9 @@ public List filterNodesWithSpace(List nodes, * @return true if we have enough space. */ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, - long metadataSizeRequired, long dataSizeRequired) { + long metadataSizeRequired, + long dataSizeRequired, + ConfigurationSource conf) { Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo); boolean enoughForData = false; @@ -308,7 +312,9 @@ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, if (dataSizeRequired > 0) { for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) { - if (reportProto.getRemaining() > dataSizeRequired) { + if (hasVolumeEnoughSpace(reportProto.getCapacity(), + reportProto.getRemaining(), reportProto.getCommitted(), + dataSizeRequired, conf)) { enoughForData = true; break; } @@ -494,7 +500,7 @@ public boolean isValidNode(DatanodeDetails datanodeDetails, NodeStatus nodeStatus = datanodeInfo.getNodeStatus(); if (nodeStatus.isNodeWritable() && (hasEnoughSpace(datanodeInfo, metadataSizeRequired, - dataSizeRequired))) { + dataSizeRequired, conf))) { LOG.debug("Datanode {} is chosen. Required metadata size is {} and " + "required data size is {} and NodeStatus is {}", datanodeDetails, metadataSizeRequired, dataSizeRequired, nodeStatus); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java index 01d3e71150d3..c59128286917 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java @@ -25,6 +25,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -85,12 +86,15 @@ protected abstract Pipeline createForRead( protected abstract void shutdown(); List pickNodesNotUsed(REPLICATION_CONFIG replicationConfig, - long metadataSizeRequired, long dataSizeRequired) throws SCMException { + long metadataSizeRequired, + long dataSizeRequired, + ConfigurationSource conf) + throws SCMException { int nodesRequired = replicationConfig.getRequiredNodes(); List healthyDNs = pickAllNodesNotUsed(replicationConfig); List healthyDNsWithSpace = healthyDNs.stream() .filter(dn -> SCMCommonPlacementPolicy - .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired)) + .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired, conf)) .limit(nodesRequired) .collect(Collectors.toList()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 1b62120c1ee7..8336bce5eae7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -163,7 +163,7 @@ public synchronized Pipeline create(RatisReplicationConfig replicationConfig, switch (factor) { case ONE: dns = pickNodesNotUsed(replicationConfig, minRatisVolumeSizeBytes, - containerSizeBytes); + containerSizeBytes, conf); break; case THREE: List excludeDueToEngagement = filterPipelineEngagement(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java index ffefc7c5f5db..5c84dfa426cf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java @@ -23,31 +23,44 @@ import com.google.common.collect.Sets; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.Node; +import org.apache.hadoop.hdds.scm.node.DatanodeInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; - import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + import java.util.function.Function; import java.util.stream.Stream; @@ -448,11 +461,68 @@ protected List chooseDatanodesInternal( } }; dummyPlacementPolicy.chooseDatanodes(null, null, 1, 1, 1); - Assertions.assertFalse(usedNodesIdentity.get()); + assertFalse(usedNodesIdentity.get()); dummyPlacementPolicy.chooseDatanodes(null, null, null, 1, 1, 1); Assertions.assertTrue(usedNodesIdentity.get()); } + @Test + public void testDatanodeIsInvalidIsCaseOfIncreasingCommittedBytes() { + NodeManager nodeMngr = mock(NodeManager.class); + ConfigurationSource confing = mock(ConfigurationSource.class); + when(confing.isConfigured(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE))) + .thenReturn(true); + when(confing.getStorageSize(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE), + eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT), + eq(StorageUnit.BYTES))).thenReturn(100000.0); + UUID datanodeUuid = spy(UUID.randomUUID()); + DummyPlacementPolicy placementPolicy = + new DummyPlacementPolicy(nodeMngr, confing, 1); + DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); + when(datanodeDetails.getUuid()).thenReturn(datanodeUuid); + + DatanodeInfo datanodeInfo = mock(DatanodeInfo.class); + NodeStatus nodeStatus = mock(NodeStatus.class); + when(nodeStatus.isNodeWritable()).thenReturn(true); + when(datanodeInfo.getNodeStatus()).thenReturn(nodeStatus); + when(nodeMngr.getNodeByUuid(eq(datanodeUuid))).thenReturn(datanodeInfo); + + StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 = + StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder() + .setCommitted(500) + .setCapacity(200000) + .setRemaining(101000) + .setStorageUuid(UUID.randomUUID().toString()) + .setStorageLocation("/data/hdds") + .build(); + StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 = + StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder() + .setCommitted(1000) + .setCapacity(200000) + .setRemaining(101000) + .setStorageUuid(UUID.randomUUID().toString()) + .setStorageLocation("/data/hdds") + .build(); + StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto + metaReport = + StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto + .newBuilder() + .setRemaining(200) + .setStorageLocation("/data/metadata") + .build(); + when(datanodeInfo.getStorageReports()) + .thenReturn(Collections.singletonList(storageReport1)) + .thenReturn(Collections.singletonList(storageReport2)); + when(datanodeInfo.getMetadataStorageReports()) + .thenReturn(Collections.singletonList(metaReport)); + + + // 500 committed bytes + assertTrue(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); + // 1000 committed bytes + assertFalse(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); + } + private static class DummyPlacementPolicy extends SCMCommonPlacementPolicy { private Map rackMap; private List racks; @@ -485,7 +555,7 @@ private static class DummyPlacementPolicy extends SCMCommonPlacementPolicy { super(nodeManager, conf); this.rackCnt = rackCnt; this.racks = IntStream.range(0, rackCnt) - .mapToObj(i -> Mockito.mock(Node.class)).collect(Collectors.toList()); + .mapToObj(i -> mock(Node.class)).collect(Collectors.toList()); List datanodeDetails = nodeManager.getAllNodes(); rackMap = datanodeRackMap.entrySet().stream() .collect(Collectors.toMap( From cfcc024f1f6bb8bc936012bcd1ccf1c9535529dc Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Wed, 6 Dec 2023 11:52:11 +0300 Subject: [PATCH 2/9] HDDS-9807. update remaining volume size (- committedBytes) instead of providing the committed bytes metric separately --- .../common/impl/StorageLocationReport.java | 25 +----- .../StorageLocationReportMXBean.java | 2 - .../common/volume/AvailableSpaceFilter.java | 21 ++--- .../common/volume/MutableVolumeSet.java | 7 +- .../ScmServerDatanodeHeartbeatProtocol.proto | 1 - .../hdds/scm/SCMCommonPlacementPolicy.java | 14 +--- .../hdds/scm/pipeline/PipelineProvider.java | 8 +- .../scm/pipeline/RatisPipelineProvider.java | 2 +- .../scm/TestSCMCommonPlacementPolicy.java | 78 +------------------ 9 files changed, 21 insertions(+), 137 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java index 8d707c858904..0222050da5e5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java @@ -42,20 +42,17 @@ public final class StorageLocationReport implements private final long capacity; private final long scmUsed; private final long remaining; - private final long committed; private final StorageType storageType; private final String storageLocation; - @SuppressWarnings("checkstyle:parameternumber") private StorageLocationReport(String id, boolean failed, long capacity, - long scmUsed, long remaining, long committed, StorageType storageType, + long scmUsed, long remaining, StorageType storageType, String storageLocation) { this.id = id; this.failed = failed; this.capacity = capacity; this.scmUsed = scmUsed; this.remaining = remaining; - this.committed = committed; this.storageType = storageType; this.storageLocation = storageLocation; } @@ -85,11 +82,6 @@ public long getRemaining() { return remaining; } - @Override - public long getCommitted() { - return committed; - } - @Override public String getStorageLocation() { return storageLocation; @@ -170,7 +162,6 @@ public StorageReportProto getProtoBufMessage() throws IOException { .setCapacity(getCapacity()) .setScmUsed(getScmUsed()) .setRemaining(getRemaining()) - .setCommitted(getCommitted()) .setStorageType(getStorageTypeProto()) .setStorageLocation(getStorageLocation()) .setFailed(isFailed()) @@ -275,7 +266,6 @@ public static class Builder { private long capacity; private long scmUsed; private long remaining; - private long committed; private StorageType storageType; private String storageLocation; @@ -344,17 +334,6 @@ public Builder setStorageType(StorageType storageTypeValue) { return this; } - /** - * Sets the committed bytes count. - * (bytes for previously created containers) - * @param committed previously created containers size - * @return StorageLocationReport.Builder - */ - public Builder setCommitted(long committed) { - this.committed = committed; - return this; - } - /** * Sets the storageLocation. * @@ -373,7 +352,7 @@ public Builder setStorageLocation(String storageLocationValue) { */ public StorageLocationReport build() { return new StorageLocationReport(id, failed, capacity, scmUsed, - remaining, committed, storageType, storageLocation); + remaining, storageType, storageLocation); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java index 707593bb7d89..fd063678137d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java @@ -33,8 +33,6 @@ public interface StorageLocationReportMXBean { long getRemaining(); - long getCommitted(); - String getStorageLocation(); String getStorageTypeName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java index 3ee33b75ec98..13041eb4d662 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.ozone.container.common.volume; -import org.apache.hadoop.hdds.conf.ConfigurationSource; - import java.util.HashMap; import java.util.Map; import java.util.function.Predicate; @@ -27,7 +25,7 @@ * Filter for selecting volumes with enough space for a new container. * Keeps track of ineligible volumes for logging/debug purposes. */ -public class AvailableSpaceFilter implements Predicate { +class AvailableSpaceFilter implements Predicate { private final long requiredSpace; private final Map fullVolumes = @@ -44,8 +42,10 @@ public boolean test(HddsVolume vol) { long free = vol.getAvailable(); long committed = vol.getCommittedBytes(); long available = free - committed; - boolean hasEnoughSpace = hasVolumeEnoughSpace(volumeCapacity, free, - committed, requiredSpace, vol.getConf()); + long volumeFreeSpace = + VolumeUsage.getMinVolumeFreeSpace(vol.getConf(), volumeCapacity); + boolean hasEnoughSpace = + available > Math.max(requiredSpace, volumeFreeSpace); mostAvailableSpace = Math.max(available, mostAvailableSpace); @@ -70,17 +70,6 @@ public String toString() { ", volumes: " + fullVolumes; } - public static boolean hasVolumeEnoughSpace(long volumeCapacity, - long volumeAvailableSpace, - long volumeCommittedBytesCount, - long requiredSpace, - ConfigurationSource conf) { - long volumeFreeSpace = - VolumeUsage.getMinVolumeFreeSpace(conf, volumeCapacity); - return (volumeAvailableSpace - volumeCommittedBytesCount) > - Math.max(requiredSpace, volumeFreeSpace); - } - private static class AvailableSpace { private final long free; private final long committed; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 3c0b6e618ee1..e78833c8a3e5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -464,7 +464,6 @@ public StorageLocationReport[] getStorageReport() { long scmUsed = 0; long remaining = 0; long capacity = 0; - long committed = 0; String rootDir = ""; failed = true; if (volumeInfo.isPresent()) { @@ -473,8 +472,9 @@ public StorageLocationReport[] getStorageReport() { scmUsed = volumeInfo.get().getScmUsed(); remaining = volumeInfo.get().getAvailable(); capacity = volumeInfo.get().getCapacity(); - committed = (volume instanceof HddsVolume) ? - ((HddsVolume) volume).getCommittedBytes() : 0; + if (volume instanceof HddsVolume) { + remaining -= ((HddsVolume)volume).getCommittedBytes(); + } failed = false; } catch (UncheckedIOException ex) { LOG.warn("Failed to get scmUsed and remaining for container " + @@ -494,7 +494,6 @@ public StorageLocationReport[] getStorageReport() { .setCapacity(capacity) .setRemaining(remaining) .setScmUsed(scmUsed) - .setCommitted(committed) .setStorageType(volume.getStorageType()); StorageLocationReport r = builder.build(); reports[counter++] = r; diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index 973ab736a68d..de9e39789b51 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -179,7 +179,6 @@ message StorageReportProto { optional uint64 remaining = 5 [default = 0]; optional StorageTypeProto storageType = 6 [default = DISK]; optional bool failed = 7 [default = false]; - optional uint64 committed = 8 [default = 0]; } message MetadataStorageReportProto { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 906ebf324967..90140c44810b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -51,8 +51,6 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.container.common.volume.AvailableSpaceFilter.hasVolumeEnoughSpace; - /** * This policy implements a set of invariants which are common * for all basic placement policies, acts as the repository of helper @@ -276,7 +274,7 @@ public List filterNodesWithSpace(List nodes, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { List nodesWithSpace = nodes.stream().filter(d -> - hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired, conf)) + hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired)) .collect(Collectors.toList()); if (nodesWithSpace.size() < nodesRequired) { @@ -300,9 +298,7 @@ public List filterNodesWithSpace(List nodes, * @return true if we have enough space. */ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, - long metadataSizeRequired, - long dataSizeRequired, - ConfigurationSource conf) { + long metadataSizeRequired, long dataSizeRequired) { Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo); boolean enoughForData = false; @@ -312,9 +308,7 @@ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, if (dataSizeRequired > 0) { for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) { - if (hasVolumeEnoughSpace(reportProto.getCapacity(), - reportProto.getRemaining(), reportProto.getCommitted(), - dataSizeRequired, conf)) { + if (reportProto.getRemaining() > dataSizeRequired) { enoughForData = true; break; } @@ -500,7 +494,7 @@ public boolean isValidNode(DatanodeDetails datanodeDetails, NodeStatus nodeStatus = datanodeInfo.getNodeStatus(); if (nodeStatus.isNodeWritable() && (hasEnoughSpace(datanodeInfo, metadataSizeRequired, - dataSizeRequired, conf))) { + dataSizeRequired))) { LOG.debug("Datanode {} is chosen. Required metadata size is {} and " + "required data size is {} and NodeStatus is {}", datanodeDetails, metadataSizeRequired, dataSizeRequired, nodeStatus); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java index c59128286917..01d3e71150d3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java @@ -25,7 +25,6 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -86,15 +85,12 @@ protected abstract Pipeline createForRead( protected abstract void shutdown(); List pickNodesNotUsed(REPLICATION_CONFIG replicationConfig, - long metadataSizeRequired, - long dataSizeRequired, - ConfigurationSource conf) - throws SCMException { + long metadataSizeRequired, long dataSizeRequired) throws SCMException { int nodesRequired = replicationConfig.getRequiredNodes(); List healthyDNs = pickAllNodesNotUsed(replicationConfig); List healthyDNsWithSpace = healthyDNs.stream() .filter(dn -> SCMCommonPlacementPolicy - .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired, conf)) + .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired)) .limit(nodesRequired) .collect(Collectors.toList()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 8336bce5eae7..1b62120c1ee7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -163,7 +163,7 @@ public synchronized Pipeline create(RatisReplicationConfig replicationConfig, switch (factor) { case ONE: dns = pickNodesNotUsed(replicationConfig, minRatisVolumeSizeBytes, - containerSizeBytes, conf); + containerSizeBytes); break; case THREE: List excludeDueToEngagement = filterPipelineEngagement(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java index 5c84dfa426cf..ffefc7c5f5db 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java @@ -23,44 +23,31 @@ import com.google.common.collect.Sets; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.Node; -import org.apache.hadoop.hdds.scm.node.DatanodeInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; - import java.util.function.Function; import java.util.stream.Stream; @@ -461,68 +448,11 @@ protected List chooseDatanodesInternal( } }; dummyPlacementPolicy.chooseDatanodes(null, null, 1, 1, 1); - assertFalse(usedNodesIdentity.get()); + Assertions.assertFalse(usedNodesIdentity.get()); dummyPlacementPolicy.chooseDatanodes(null, null, null, 1, 1, 1); Assertions.assertTrue(usedNodesIdentity.get()); } - @Test - public void testDatanodeIsInvalidIsCaseOfIncreasingCommittedBytes() { - NodeManager nodeMngr = mock(NodeManager.class); - ConfigurationSource confing = mock(ConfigurationSource.class); - when(confing.isConfigured(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE))) - .thenReturn(true); - when(confing.getStorageSize(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE), - eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT), - eq(StorageUnit.BYTES))).thenReturn(100000.0); - UUID datanodeUuid = spy(UUID.randomUUID()); - DummyPlacementPolicy placementPolicy = - new DummyPlacementPolicy(nodeMngr, confing, 1); - DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); - when(datanodeDetails.getUuid()).thenReturn(datanodeUuid); - - DatanodeInfo datanodeInfo = mock(DatanodeInfo.class); - NodeStatus nodeStatus = mock(NodeStatus.class); - when(nodeStatus.isNodeWritable()).thenReturn(true); - when(datanodeInfo.getNodeStatus()).thenReturn(nodeStatus); - when(nodeMngr.getNodeByUuid(eq(datanodeUuid))).thenReturn(datanodeInfo); - - StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 = - StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder() - .setCommitted(500) - .setCapacity(200000) - .setRemaining(101000) - .setStorageUuid(UUID.randomUUID().toString()) - .setStorageLocation("/data/hdds") - .build(); - StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 = - StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder() - .setCommitted(1000) - .setCapacity(200000) - .setRemaining(101000) - .setStorageUuid(UUID.randomUUID().toString()) - .setStorageLocation("/data/hdds") - .build(); - StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto - metaReport = - StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto - .newBuilder() - .setRemaining(200) - .setStorageLocation("/data/metadata") - .build(); - when(datanodeInfo.getStorageReports()) - .thenReturn(Collections.singletonList(storageReport1)) - .thenReturn(Collections.singletonList(storageReport2)); - when(datanodeInfo.getMetadataStorageReports()) - .thenReturn(Collections.singletonList(metaReport)); - - - // 500 committed bytes - assertTrue(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); - // 1000 committed bytes - assertFalse(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); - } - private static class DummyPlacementPolicy extends SCMCommonPlacementPolicy { private Map rackMap; private List racks; @@ -555,7 +485,7 @@ private static class DummyPlacementPolicy extends SCMCommonPlacementPolicy { super(nodeManager, conf); this.rackCnt = rackCnt; this.racks = IntStream.range(0, rackCnt) - .mapToObj(i -> mock(Node.class)).collect(Collectors.toList()); + .mapToObj(i -> Mockito.mock(Node.class)).collect(Collectors.toList()); List datanodeDetails = nodeManager.getAllNodes(); rackMap = datanodeRackMap.entrySet().stream() .collect(Collectors.toMap( From db1305cba7e3d9aa05781dbbe2cc14f8ce707bc8 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Thu, 7 Dec 2023 21:34:21 +0300 Subject: [PATCH 3/9] Revert "HDDS-9807. update remaining volume size (- committedBytes) instead of providing the committed bytes metric separately" This reverts commit cfcc024f1f6bb8bc936012bcd1ccf1c9535529dc. --- .../common/impl/StorageLocationReport.java | 25 +++++- .../StorageLocationReportMXBean.java | 2 + .../common/volume/AvailableSpaceFilter.java | 21 +++-- .../common/volume/MutableVolumeSet.java | 7 +- .../ScmServerDatanodeHeartbeatProtocol.proto | 1 + .../hdds/scm/SCMCommonPlacementPolicy.java | 14 +++- .../hdds/scm/pipeline/PipelineProvider.java | 8 +- .../scm/pipeline/RatisPipelineProvider.java | 2 +- .../scm/TestSCMCommonPlacementPolicy.java | 78 ++++++++++++++++++- 9 files changed, 137 insertions(+), 21 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java index 0222050da5e5..8d707c858904 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java @@ -42,17 +42,20 @@ public final class StorageLocationReport implements private final long capacity; private final long scmUsed; private final long remaining; + private final long committed; private final StorageType storageType; private final String storageLocation; + @SuppressWarnings("checkstyle:parameternumber") private StorageLocationReport(String id, boolean failed, long capacity, - long scmUsed, long remaining, StorageType storageType, + long scmUsed, long remaining, long committed, StorageType storageType, String storageLocation) { this.id = id; this.failed = failed; this.capacity = capacity; this.scmUsed = scmUsed; this.remaining = remaining; + this.committed = committed; this.storageType = storageType; this.storageLocation = storageLocation; } @@ -82,6 +85,11 @@ public long getRemaining() { return remaining; } + @Override + public long getCommitted() { + return committed; + } + @Override public String getStorageLocation() { return storageLocation; @@ -162,6 +170,7 @@ public StorageReportProto getProtoBufMessage() throws IOException { .setCapacity(getCapacity()) .setScmUsed(getScmUsed()) .setRemaining(getRemaining()) + .setCommitted(getCommitted()) .setStorageType(getStorageTypeProto()) .setStorageLocation(getStorageLocation()) .setFailed(isFailed()) @@ -266,6 +275,7 @@ public static class Builder { private long capacity; private long scmUsed; private long remaining; + private long committed; private StorageType storageType; private String storageLocation; @@ -334,6 +344,17 @@ public Builder setStorageType(StorageType storageTypeValue) { return this; } + /** + * Sets the committed bytes count. + * (bytes for previously created containers) + * @param committed previously created containers size + * @return StorageLocationReport.Builder + */ + public Builder setCommitted(long committed) { + this.committed = committed; + return this; + } + /** * Sets the storageLocation. * @@ -352,7 +373,7 @@ public Builder setStorageLocation(String storageLocationValue) { */ public StorageLocationReport build() { return new StorageLocationReport(id, failed, capacity, scmUsed, - remaining, storageType, storageLocation); + remaining, committed, storageType, storageLocation); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java index fd063678137d..707593bb7d89 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java @@ -33,6 +33,8 @@ public interface StorageLocationReportMXBean { long getRemaining(); + long getCommitted(); + String getStorageLocation(); String getStorageTypeName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java index 13041eb4d662..3ee33b75ec98 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.ozone.container.common.volume; +import org.apache.hadoop.hdds.conf.ConfigurationSource; + import java.util.HashMap; import java.util.Map; import java.util.function.Predicate; @@ -25,7 +27,7 @@ * Filter for selecting volumes with enough space for a new container. * Keeps track of ineligible volumes for logging/debug purposes. */ -class AvailableSpaceFilter implements Predicate { +public class AvailableSpaceFilter implements Predicate { private final long requiredSpace; private final Map fullVolumes = @@ -42,10 +44,8 @@ public boolean test(HddsVolume vol) { long free = vol.getAvailable(); long committed = vol.getCommittedBytes(); long available = free - committed; - long volumeFreeSpace = - VolumeUsage.getMinVolumeFreeSpace(vol.getConf(), volumeCapacity); - boolean hasEnoughSpace = - available > Math.max(requiredSpace, volumeFreeSpace); + boolean hasEnoughSpace = hasVolumeEnoughSpace(volumeCapacity, free, + committed, requiredSpace, vol.getConf()); mostAvailableSpace = Math.max(available, mostAvailableSpace); @@ -70,6 +70,17 @@ public String toString() { ", volumes: " + fullVolumes; } + public static boolean hasVolumeEnoughSpace(long volumeCapacity, + long volumeAvailableSpace, + long volumeCommittedBytesCount, + long requiredSpace, + ConfigurationSource conf) { + long volumeFreeSpace = + VolumeUsage.getMinVolumeFreeSpace(conf, volumeCapacity); + return (volumeAvailableSpace - volumeCommittedBytesCount) > + Math.max(requiredSpace, volumeFreeSpace); + } + private static class AvailableSpace { private final long free; private final long committed; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index e78833c8a3e5..3c0b6e618ee1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -464,6 +464,7 @@ public StorageLocationReport[] getStorageReport() { long scmUsed = 0; long remaining = 0; long capacity = 0; + long committed = 0; String rootDir = ""; failed = true; if (volumeInfo.isPresent()) { @@ -472,9 +473,8 @@ public StorageLocationReport[] getStorageReport() { scmUsed = volumeInfo.get().getScmUsed(); remaining = volumeInfo.get().getAvailable(); capacity = volumeInfo.get().getCapacity(); - if (volume instanceof HddsVolume) { - remaining -= ((HddsVolume)volume).getCommittedBytes(); - } + committed = (volume instanceof HddsVolume) ? + ((HddsVolume) volume).getCommittedBytes() : 0; failed = false; } catch (UncheckedIOException ex) { LOG.warn("Failed to get scmUsed and remaining for container " + @@ -494,6 +494,7 @@ public StorageLocationReport[] getStorageReport() { .setCapacity(capacity) .setRemaining(remaining) .setScmUsed(scmUsed) + .setCommitted(committed) .setStorageType(volume.getStorageType()); StorageLocationReport r = builder.build(); reports[counter++] = r; diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index de9e39789b51..973ab736a68d 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -179,6 +179,7 @@ message StorageReportProto { optional uint64 remaining = 5 [default = 0]; optional StorageTypeProto storageType = 6 [default = DISK]; optional bool failed = 7 [default = false]; + optional uint64 committed = 8 [default = 0]; } message MetadataStorageReportProto { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 90140c44810b..906ebf324967 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -51,6 +51,8 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.apache.hadoop.ozone.container.common.volume.AvailableSpaceFilter.hasVolumeEnoughSpace; + /** * This policy implements a set of invariants which are common * for all basic placement policies, acts as the repository of helper @@ -274,7 +276,7 @@ public List filterNodesWithSpace(List nodes, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { List nodesWithSpace = nodes.stream().filter(d -> - hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired)) + hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired, conf)) .collect(Collectors.toList()); if (nodesWithSpace.size() < nodesRequired) { @@ -298,7 +300,9 @@ public List filterNodesWithSpace(List nodes, * @return true if we have enough space. */ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, - long metadataSizeRequired, long dataSizeRequired) { + long metadataSizeRequired, + long dataSizeRequired, + ConfigurationSource conf) { Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo); boolean enoughForData = false; @@ -308,7 +312,9 @@ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, if (dataSizeRequired > 0) { for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) { - if (reportProto.getRemaining() > dataSizeRequired) { + if (hasVolumeEnoughSpace(reportProto.getCapacity(), + reportProto.getRemaining(), reportProto.getCommitted(), + dataSizeRequired, conf)) { enoughForData = true; break; } @@ -494,7 +500,7 @@ public boolean isValidNode(DatanodeDetails datanodeDetails, NodeStatus nodeStatus = datanodeInfo.getNodeStatus(); if (nodeStatus.isNodeWritable() && (hasEnoughSpace(datanodeInfo, metadataSizeRequired, - dataSizeRequired))) { + dataSizeRequired, conf))) { LOG.debug("Datanode {} is chosen. Required metadata size is {} and " + "required data size is {} and NodeStatus is {}", datanodeDetails, metadataSizeRequired, dataSizeRequired, nodeStatus); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java index 01d3e71150d3..c59128286917 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java @@ -25,6 +25,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -85,12 +86,15 @@ protected abstract Pipeline createForRead( protected abstract void shutdown(); List pickNodesNotUsed(REPLICATION_CONFIG replicationConfig, - long metadataSizeRequired, long dataSizeRequired) throws SCMException { + long metadataSizeRequired, + long dataSizeRequired, + ConfigurationSource conf) + throws SCMException { int nodesRequired = replicationConfig.getRequiredNodes(); List healthyDNs = pickAllNodesNotUsed(replicationConfig); List healthyDNsWithSpace = healthyDNs.stream() .filter(dn -> SCMCommonPlacementPolicy - .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired)) + .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired, conf)) .limit(nodesRequired) .collect(Collectors.toList()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 1b62120c1ee7..8336bce5eae7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -163,7 +163,7 @@ public synchronized Pipeline create(RatisReplicationConfig replicationConfig, switch (factor) { case ONE: dns = pickNodesNotUsed(replicationConfig, minRatisVolumeSizeBytes, - containerSizeBytes); + containerSizeBytes, conf); break; case THREE: List excludeDueToEngagement = filterPipelineEngagement(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java index ffefc7c5f5db..5c84dfa426cf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java @@ -23,31 +23,44 @@ import com.google.common.collect.Sets; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.Node; +import org.apache.hadoop.hdds.scm.node.DatanodeInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; - import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + import java.util.function.Function; import java.util.stream.Stream; @@ -448,11 +461,68 @@ protected List chooseDatanodesInternal( } }; dummyPlacementPolicy.chooseDatanodes(null, null, 1, 1, 1); - Assertions.assertFalse(usedNodesIdentity.get()); + assertFalse(usedNodesIdentity.get()); dummyPlacementPolicy.chooseDatanodes(null, null, null, 1, 1, 1); Assertions.assertTrue(usedNodesIdentity.get()); } + @Test + public void testDatanodeIsInvalidIsCaseOfIncreasingCommittedBytes() { + NodeManager nodeMngr = mock(NodeManager.class); + ConfigurationSource confing = mock(ConfigurationSource.class); + when(confing.isConfigured(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE))) + .thenReturn(true); + when(confing.getStorageSize(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE), + eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT), + eq(StorageUnit.BYTES))).thenReturn(100000.0); + UUID datanodeUuid = spy(UUID.randomUUID()); + DummyPlacementPolicy placementPolicy = + new DummyPlacementPolicy(nodeMngr, confing, 1); + DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); + when(datanodeDetails.getUuid()).thenReturn(datanodeUuid); + + DatanodeInfo datanodeInfo = mock(DatanodeInfo.class); + NodeStatus nodeStatus = mock(NodeStatus.class); + when(nodeStatus.isNodeWritable()).thenReturn(true); + when(datanodeInfo.getNodeStatus()).thenReturn(nodeStatus); + when(nodeMngr.getNodeByUuid(eq(datanodeUuid))).thenReturn(datanodeInfo); + + StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 = + StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder() + .setCommitted(500) + .setCapacity(200000) + .setRemaining(101000) + .setStorageUuid(UUID.randomUUID().toString()) + .setStorageLocation("/data/hdds") + .build(); + StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 = + StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder() + .setCommitted(1000) + .setCapacity(200000) + .setRemaining(101000) + .setStorageUuid(UUID.randomUUID().toString()) + .setStorageLocation("/data/hdds") + .build(); + StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto + metaReport = + StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto + .newBuilder() + .setRemaining(200) + .setStorageLocation("/data/metadata") + .build(); + when(datanodeInfo.getStorageReports()) + .thenReturn(Collections.singletonList(storageReport1)) + .thenReturn(Collections.singletonList(storageReport2)); + when(datanodeInfo.getMetadataStorageReports()) + .thenReturn(Collections.singletonList(metaReport)); + + + // 500 committed bytes + assertTrue(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); + // 1000 committed bytes + assertFalse(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); + } + private static class DummyPlacementPolicy extends SCMCommonPlacementPolicy { private Map rackMap; private List racks; @@ -485,7 +555,7 @@ private static class DummyPlacementPolicy extends SCMCommonPlacementPolicy { super(nodeManager, conf); this.rackCnt = rackCnt; this.racks = IntStream.range(0, rackCnt) - .mapToObj(i -> Mockito.mock(Node.class)).collect(Collectors.toList()); + .mapToObj(i -> mock(Node.class)).collect(Collectors.toList()); List datanodeDetails = nodeManager.getAllNodes(); rackMap = datanodeRackMap.entrySet().stream() .collect(Collectors.toMap( From 9d8c56d88bf95545895f9e4953211ca78a2a7ac5 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Fri, 8 Dec 2023 13:50:20 +0300 Subject: [PATCH 4/9] HDDS-9807. add committed storage bytes count to the recon datanode report and datanode usageinfo --- .../static/swagger-resources/recon-api.yaml | 6 ++++ .../src/main/proto/hdds.proto | 1 + .../balancer/ContainerBalancerTask.java | 2 +- .../container/placement/metrics/NodeStat.java | 8 ++++- .../placement/metrics/SCMNodeMetric.java | 15 ++++++---- .../placement/metrics/SCMNodeStat.java | 30 +++++++++++++++---- .../hdds/scm/node/DatanodeUsageInfo.java | 1 + .../hadoop/hdds/scm/node/SCMNodeManager.java | 8 +++-- .../hdds/scm/container/MockNodeManager.java | 2 +- .../balancer/TestContainerBalancerTask.java | 2 +- .../balancer/TestFindTargetStrategy.java | 22 +++++++------- .../TestSCMContainerPlacementCapacity.java | 8 ++--- .../placement/TestDatanodeMetrics.java | 8 ++--- .../scm/cli/datanode/UsageInfoSubcommand.java | 9 ++++++ .../ozone/recon/api/ClusterStateEndpoint.java | 3 +- .../hadoop/ozone/recon/api/NodeEndpoint.java | 3 +- .../api/types/DatanodeStorageReport.java | 9 +++++- .../webapps/recon/ozone-recon-web/api/db.json | 3 +- .../src/components/storageBar/storageBar.less | 5 ++++ .../src/components/storageBar/storageBar.tsx | 5 +++- .../src/types/datanode.types.tsx | 1 + .../src/views/datanodes/datanodes.tsx | 4 ++- .../api/TestNSSummaryEndpointWithFSO.java | 2 +- .../api/TestNSSummaryEndpointWithLegacy.java | 2 +- 24 files changed, 114 insertions(+), 45 deletions(-) diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml index 3b41132f5f57..9ff328776657 100644 --- a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml +++ b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml @@ -1433,6 +1433,9 @@ components: remaining: type: number example: 1080410456064 + committed: + type: number + example: 1080410456 containers: type: integer example: 26 @@ -1480,6 +1483,9 @@ components: remaining: type: number example: 270071111680 + committed: + type: number + example: 27007111 pipelines: type: array items: diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 5c20745c061e..b65355eeb5c2 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -187,6 +187,7 @@ message DatanodeUsageInfoProto { optional int64 remaining = 3; optional DatanodeDetailsProto node = 4; optional int64 containerCount = 5; + optional int64 committed = 6; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index 6541d75d2793..c541c5d6c941 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -932,7 +932,7 @@ private long ratioToBytes(Long nodeCapacity, double utilizationRatio) { return 0; } SCMNodeStat aggregatedStats = new SCMNodeStat( - 0, 0, 0); + 0, 0, 0, 0); for (DatanodeUsageInfo node : nodes) { aggregatedStats.add(node.getScmNodeStat()); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java index d6857d395cfb..b963e8f0745e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java @@ -42,6 +42,12 @@ interface NodeStat { */ LongMetric getRemaining(); + /** + * Get the committed space of the node. + * @return the committed space of the node + */ + LongMetric getCommitted(); + /** * Set the total/used/remaining space. * @param capacity - total space. @@ -49,7 +55,7 @@ interface NodeStat { * @param remain - remaining space. */ @VisibleForTesting - void set(long capacity, long used, long remain); + void set(long capacity, long used, long remain, long committed); /** * Adding of the stat. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index 2f5c6f33f73e..a86c40cf4a1f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -36,16 +36,19 @@ public SCMNodeMetric(SCMNodeStat stat) { } /** - * Set the capacity, used and remaining space on a datanode. + * Set the capacity, used, remaining and committed space on a datanode. * - * @param capacity in bytes - * @param used in bytes + * @param capacity in bytes + * @param used in bytes * @param remaining in bytes + * @param committed + * @paaram committed in bytes */ @VisibleForTesting - public SCMNodeMetric(long capacity, long used, long remaining) { + public SCMNodeMetric(long capacity, long used, long remaining, + long committed) { this.stat = new SCMNodeStat(); - this.stat.set(capacity, used, remaining); + this.stat.set(capacity, used, remaining, committed); } /** @@ -156,7 +159,7 @@ public SCMNodeStat get() { @Override public void set(SCMNodeStat value) { stat.set(value.getCapacity().get(), value.getScmUsed().get(), - value.getRemaining().get()); + value.getRemaining().get(), value.getCommitted().get()); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java index 962bbb464ecc..712b6d198593 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java @@ -28,16 +28,18 @@ public class SCMNodeStat implements NodeStat { private LongMetric capacity; private LongMetric scmUsed; private LongMetric remaining; + private LongMetric committed; public SCMNodeStat() { - this(0L, 0L, 0L); + this(0L, 0L, 0L, 0L); } public SCMNodeStat(SCMNodeStat other) { - this(other.capacity.get(), other.scmUsed.get(), other.remaining.get()); + this(other.capacity.get(), other.scmUsed.get(), other.remaining.get(), + other.committed.get()); } - public SCMNodeStat(long capacity, long used, long remaining) { + public SCMNodeStat(long capacity, long used, long remaining, long committed) { Preconditions.checkArgument(capacity >= 0, "Capacity cannot be " + "negative."); Preconditions.checkArgument(used >= 0, "used space cannot be " + @@ -47,6 +49,7 @@ public SCMNodeStat(long capacity, long used, long remaining) { this.capacity = new LongMetric(capacity); this.scmUsed = new LongMetric(used); this.remaining = new LongMetric(remaining); + this.committed = new LongMetric(committed); } /** @@ -73,6 +76,15 @@ public LongMetric getRemaining() { return remaining; } + /** + * + * @return the total committed space on the node + */ + @Override + public LongMetric getCommitted() { + return committed; + } + /** * Set the capacity, used and remaining space on a datanode. * @@ -82,7 +94,8 @@ public LongMetric getRemaining() { */ @Override @VisibleForTesting - public void set(long newCapacity, long newUsed, long newRemaining) { + public void set(long newCapacity, long newUsed, long newRemaining, + long newCommitted) { Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " + "negative."); Preconditions.checkArgument(newUsed >= 0, "used space cannot be " + @@ -93,6 +106,7 @@ public void set(long newCapacity, long newUsed, long newRemaining) { this.capacity = new LongMetric(newCapacity); this.scmUsed = new LongMetric(newUsed); this.remaining = new LongMetric(newRemaining); + this.committed = new LongMetric(newCommitted); } /** @@ -106,6 +120,7 @@ public SCMNodeStat add(NodeStat stat) { this.capacity.set(this.getCapacity().get() + stat.getCapacity().get()); this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get()); this.remaining.set(this.getRemaining().get() + stat.getRemaining().get()); + this.committed.set(this.getCommitted().get() + stat.getCommitted().get()); return this; } @@ -120,6 +135,7 @@ public SCMNodeStat subtract(NodeStat stat) { this.capacity.set(this.getCapacity().get() - stat.getCapacity().get()); this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get()); this.remaining.set(this.getRemaining().get() - stat.getRemaining().get()); + this.committed.set(this.getCommitted().get() - stat.getCommitted().get()); return this; } @@ -129,13 +145,15 @@ public boolean equals(Object to) { SCMNodeStat tempStat = (SCMNodeStat) to; return capacity.isEqual(tempStat.getCapacity().get()) && scmUsed.isEqual(tempStat.getScmUsed().get()) && - remaining.isEqual(tempStat.getRemaining().get()); + remaining.isEqual(tempStat.getRemaining().get()) && + committed.isEqual(tempStat.getCommitted().get()); } return false; } @Override public int hashCode() { - return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get()); + return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get() ^ + committed.get()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java index 14353cfa7e37..331b32e732f2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java @@ -205,6 +205,7 @@ private DatanodeUsageInfoProto.Builder toProtoBuilder(int clientVersion) { builder.setCapacity(scmNodeStat.getCapacity().get()); builder.setUsed(scmNodeStat.getScmUsed().get()); builder.setRemaining(scmNodeStat.getRemaining().get()); + builder.setCommitted(scmNodeStat.getCommitted().get()); } builder.setContainerCount(containerCount); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 167b25afd01c..a0ce8e572648 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -855,13 +855,15 @@ public SCMNodeStat getStats() { long capacity = 0L; long used = 0L; long remaining = 0L; + long committed = 0L; for (SCMNodeStat stat : getNodeStats().values()) { capacity += stat.getCapacity().get(); used += stat.getScmUsed().get(); remaining += stat.getRemaining().get(); + committed += stat.getCommitted().get(); } - return new SCMNodeStat(capacity, used, remaining); + return new SCMNodeStat(capacity, used, remaining, committed); } /** @@ -966,6 +968,7 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { long capacity = 0L; long used = 0L; long remaining = 0L; + long committed = 0L; final DatanodeInfo datanodeInfo = nodeStateManager .getNode(datanodeDetails); @@ -975,8 +978,9 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { capacity += reportProto.getCapacity(); used += reportProto.getScmUsed(); remaining += reportProto.getRemaining(); + committed += reportProto.getCommitted(); } - return new SCMNodeStat(capacity, used, remaining); + return new SCMNodeStat(capacity, used, remaining, committed); } catch (NodeNotFoundException e) { LOG.warn("Cannot generate NodeStat, datanode {} not found.", datanodeDetails.getUuidString()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 98638ebe009d..e40aeb2cba81 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -227,7 +227,7 @@ private void populateNodeMetric(DatanodeDetails datanodeDetails, int x) { NODES[x % NODES.length].capacity - NODES[x % NODES.length].used; newStat.set( (NODES[x % NODES.length].capacity), - (NODES[x % NODES.length].used), remaining); + (NODES[x % NODES.length].used), remaining, 0); this.nodeMetricMap.put(datanodeDetails, newStat); aggregateStat.add(newStat); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index 4bc3cf43cf6e..1c0693cf9081 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -1207,7 +1207,7 @@ private double createCluster() { datanodeCapacity = (long) (datanodeUsedSpace / nodeUtilizations.get(i)); } SCMNodeStat stat = new SCMNodeStat(datanodeCapacity, datanodeUsedSpace, - datanodeCapacity - datanodeUsedSpace); + datanodeCapacity - datanodeUsedSpace, 0); nodesInCluster.get(i).setScmNodeStat(stat); clusterUsedSpace += datanodeUsedSpace; clusterCapacity += datanodeCapacity; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java index 7e734042d883..b11f9bfd10c5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java @@ -56,11 +56,11 @@ public void testFindTargetGreedyByUsage() { //create three datanodes with different usageinfo DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40, 0)); DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60, 0)); DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80, 0)); //insert in ascending order overUtilizedDatanodes.add(dui1); @@ -98,11 +98,11 @@ public void testFindTargetGreedyByUsage() { public void testResetPotentialTargets() { // create three datanodes with different usage infos DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70)); + .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70, 0)); DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80)); + .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80, 0)); DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90)); + .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90, 0)); List potentialTargets = new ArrayList<>(); potentialTargets.add(dui1); @@ -179,18 +179,18 @@ public void testFindTargetGreedyByNetworkTopology() { List overUtilizedDatanodes = new ArrayList<>(); //set the farthest target with the lowest usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90))); + new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90, 0))); //set the tree targets, which have the same network topology distance //to source , with different usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20))); + new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20, 0))); overUtilizedDatanodes.add( - new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40))); + new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40, 0))); overUtilizedDatanodes.add( - new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60))); + new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60, 0))); //set the nearest target with the highest usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10))); + new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10, 0))); FindTargetGreedyByNetworkTopology findTargetGreedyByNetworkTopology = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index 910fe75ede6c..953fe2950ec9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -103,13 +103,13 @@ public void chooseDatanodes() throws SCMException { .thenReturn(new ArrayList<>(datanodes)); when(mockNodeManager.getNodeStat(any())) - .thenReturn(new SCMNodeMetric(100L, 0L, 100L)); + .thenReturn(new SCMNodeMetric(100L, 0L, 100L, 0)); when(mockNodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(100L, 90L, 10L)); + .thenReturn(new SCMNodeMetric(100L, 90L, 10L, 0)); when(mockNodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(100L, 80L, 20L)); + .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0)); when(mockNodeManager.getNodeStat(datanodes.get(4))) - .thenReturn(new SCMNodeMetric(100L, 70L, 30L)); + .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0)); when(mockNodeManager.getNodeByUuid(any(UUID.class))).thenAnswer( invocation -> datanodes.stream() .filter(dn -> dn.getUuid().equals(invocation.getArgument(0))) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java index 6ba2fc440a4f..3600d92c4b98 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java @@ -31,13 +31,13 @@ public class TestDatanodeMetrics { @Test public void testSCMNodeMetric() { - SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L); + SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L, 0); assertEquals((long) stat.getCapacity().get(), 100L); assertEquals(10L, (long) stat.getScmUsed().get()); assertEquals(90L, (long) stat.getRemaining().get()); SCMNodeMetric metric = new SCMNodeMetric(stat); - SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L); + SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L, 0); assertEquals(100L, (long) stat.getCapacity().get()); assertEquals(10L, (long) stat.getScmUsed().get()); assertEquals(90L, (long) stat.getRemaining().get()); @@ -53,8 +53,8 @@ public void testSCMNodeMetric() { assertTrue(metric.isGreater(zeroMetric.get())); // Another case when nodes have similar weight - SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L); - SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L); + SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L, 0); + SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L, 0); assertTrue(new SCMNodeMetric(stat2).isGreater(stat1)); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java index d46513b24bbd..87beef0d5204 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java @@ -155,6 +155,8 @@ private void printInfo(DatanodeUsage info) { + " B", StringUtils.byteDesc(info.getRemaining())); System.out.printf("%-13s: %s %n", "Remaining %", PERCENT_FORMAT.format(info.getRemainingRatio())); + System.out.printf("%-13s: %s (%s) %n", "Committed", info.getCommitted() + + " B", StringUtils.byteDesc(info.getCommitted())); System.out.printf("%-13s: %d %n%n", "Container(s)", info.getContainerCount()); } @@ -181,6 +183,7 @@ private static class DatanodeUsage { private long capacity = 0; private long used = 0; private long remaining = 0; + private long committed = 0; private long containerCount = 0; DatanodeUsage(HddsProtos.DatanodeUsageInfoProto proto) { @@ -196,6 +199,9 @@ private static class DatanodeUsage { if (proto.hasRemaining()) { remaining = proto.getRemaining(); } + if (proto.hasCommitted()) { + committed = proto.getCommitted(); + } if (proto.hasContainerCount()) { containerCount = proto.getContainerCount(); } @@ -220,6 +226,9 @@ public long getOzoneUsed() { public long getRemaining() { return remaining; } + public long getCommitted() { + return committed; + } public long getContainerCount() { return containerCount; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java index bc87c402eb29..b074e5ba56a6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java @@ -120,7 +120,8 @@ public Response getClusterState() { SCMNodeStat stats = nodeManager.getStats(); DatanodeStorageReport storageReport = new DatanodeStorageReport(stats.getCapacity().get(), - stats.getScmUsed().get(), stats.getRemaining().get()); + stats.getScmUsed().get(), stats.getRemaining().get(), + stats.getCommitted().get()); ClusterStateResponse.Builder builder = ClusterStateResponse.newBuilder(); GlobalStats volumeRecord = globalStatsDao.findById( diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 33df0ca1bd5f..968bfbc46343 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -168,6 +168,7 @@ private DatanodeStorageReport getStorageReport(DatanodeDetails datanode) { long capacity = nodeStat.getCapacity().get(); long used = nodeStat.getScmUsed().get(); long remaining = nodeStat.getRemaining().get(); - return new DatanodeStorageReport(capacity, used, remaining); + long committed = nodeStat.getCommitted().get(); + return new DatanodeStorageReport(capacity, used, remaining, committed); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java index d3fbb598c1b2..43a20317a29e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java @@ -24,11 +24,14 @@ public class DatanodeStorageReport { private long capacity; private long used; private long remaining; + private long committed; - public DatanodeStorageReport(long capacity, long used, long remaining) { + public DatanodeStorageReport(long capacity, long used, long remaining, + long committed) { this.capacity = capacity; this.used = used; this.remaining = remaining; + this.committed = committed; } public long getCapacity() { @@ -42,4 +45,8 @@ public long getUsed() { public long getRemaining() { return remaining; } + + public long getCommitted() { + return committed; + } } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index 60362299fa55..eeab2408329f 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -29,7 +29,8 @@ "storageReport": { "capacity": 62725623808, "used": 488288256, - "remaining": 21005319168 + "remaining": 21005319168, + "committed": 10240000 }, "pipelines": [ { diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less index b2dddbcaa37d..ecba534cc08c 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less @@ -19,6 +19,7 @@ @progress-gray: #d0d0d0; @progress-blue: #1890ff; @progress-green: #52c41a; +@progress-dark-grey: #424242; .storage-cell-container { position: relative; @@ -45,3 +46,7 @@ .remaining-bg { color: @progress-gray; } + +.committed-bg { + color: @progress-dark-grey; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx index 10decce103fa..780874b8763f 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx @@ -32,6 +32,7 @@ interface IStorageBarProps extends RouteComponentProps { total: number; used: number; remaining: number; + committed: number; showMeta?: boolean; } @@ -39,6 +40,7 @@ const defaultProps = { total: 0, used: 0, remaining: 0, + committed: 0, showMeta: true }; @@ -46,7 +48,7 @@ class StorageBar extends React.Component { static defaultProps = defaultProps; render() { - const {total, used, remaining, showMeta} = this.props; + const {total, used, remaining, committed, showMeta} = this.props; const nonOzoneUsed = total - remaining - used; const totalUsed = total - remaining; const tooltip = ( @@ -54,6 +56,7 @@ class StorageBar extends React.Component {
Ozone Used ({size(used)})
Non Ozone Used ({size(nonOzoneUsed)})
Remaining ({size(remaining)})
+
Committed ({size(committed)})
); const metaElement = showMeta ?
{size(used)} + {size(nonOzoneUsed)} / {size(total)}
: null; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx index 8f92742916f3..d69466ac0fea 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx @@ -30,4 +30,5 @@ export interface IStorageReport { capacity: number; used: number; remaining: number; + committed: number; } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index e418bf2fefd0..4d80bc7d2dda 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -69,6 +69,7 @@ interface IDatanode { storageUsed: number; storageTotal: number; storageRemaining: number; + storageCommitted: number; pipelines: IPipeline[]; containers: number; openContainers: number; @@ -173,7 +174,7 @@ const COLUMNS = [ render: (text: string, record: IDatanode) => ( + remaining={record.storageRemaining} committed={record.storageCommitted}/> )}, { title: 'Last Heartbeat', @@ -358,6 +359,7 @@ export class Datanodes extends React.Component, IDatanode storageUsed: datanode.storageReport.used, storageTotal: datanode.storageReport.capacity, storageRemaining: datanode.storageReport.remaining, + storageCommitted: datanode.storageReport.committed, pipelines: datanode.pipelines, containers: datanode.containers, openContainers: datanode.openContainers, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index d3bee19ba6e9..c5581f676f5c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -1248,6 +1248,6 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE); + ROOT_QUOTA - ROOT_DATA_SIZE, 0); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index b324bd6b4276..ec781fbbf88d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -1286,6 +1286,6 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE); + ROOT_QUOTA - ROOT_DATA_SIZE, 0); } } From 5fe6cebfe734e379a4c5ebf04f7fbb485f02ef93 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Fri, 8 Dec 2023 16:48:05 +0300 Subject: [PATCH 5/9] HDDS-9807. add committed bytes count to volume info metrics --- .../ozone/container/common/volume/VolumeInfoMetrics.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java index c90dcea81ff2..18e7354ec1da 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java @@ -142,4 +142,9 @@ public long getTotalCapacity() { return (getUsed() + getAvailable() + getReserved()); } + @Metric("Returns the Committed bytes of the Volume") + public long getCommitted() { + return volume.getCommittedBytes(); + } + } From 59ece8dc0c04527186ef1c77e191c70c68b69e7c Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Sun, 10 Dec 2023 09:49:24 +0300 Subject: [PATCH 6/9] HDDS-9807. provide datanode freeSpaceToSpare through heartbeat request, tiny changes in test to check datanode availability depending on committed bytes count --- .../common/impl/StorageLocationReport.java | 35 ++++++++++-- .../StorageLocationReportMXBean.java | 2 + .../common/volume/AvailableSpaceFilter.java | 19 ++----- .../container/common/volume/VolumeUsage.java | 8 +++ .../ScmServerDatanodeHeartbeatProtocol.proto | 1 + .../hdds/scm/SCMCommonPlacementPolicy.java | 9 ++-- .../scm/TestSCMCommonPlacementPolicy.java | 54 ++++++++++--------- .../scm/cli/datanode/UsageInfoSubcommand.java | 7 ++- .../webapps/recon/ozone-recon-web/api/db.json | 54 ++++++++++++------- .../src/components/storageBar/storageBar.tsx | 2 +- 10 files changed, 122 insertions(+), 69 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java index 8d707c858904..f31d45a7782b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.impl; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto. StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto. @@ -27,6 +28,7 @@ StorageContainerDatanodeProtocolProtos.StorageTypeProto; import org.apache.hadoop.ozone.container.common.interfaces .StorageLocationReportMXBean; +import org.apache.hadoop.ozone.container.common.volume.VolumeUsage; import java.io.IOException; @@ -43,19 +45,21 @@ public final class StorageLocationReport implements private final long scmUsed; private final long remaining; private final long committed; + private final long freeSpaceToSpare; private final StorageType storageType; private final String storageLocation; @SuppressWarnings("checkstyle:parameternumber") private StorageLocationReport(String id, boolean failed, long capacity, - long scmUsed, long remaining, long committed, StorageType storageType, - String storageLocation) { + long scmUsed, long remaining, long committed, long freeSpaceToSpare, + StorageType storageType, String storageLocation) { this.id = id; this.failed = failed; this.capacity = capacity; this.scmUsed = scmUsed; this.remaining = remaining; this.committed = committed; + this.freeSpaceToSpare = freeSpaceToSpare; this.storageType = storageType; this.storageLocation = storageLocation; } @@ -90,6 +94,11 @@ public long getCommitted() { return committed; } + @Override + public long getFreeSpaceToSpare() { + return freeSpaceToSpare; + } + @Override public String getStorageLocation() { return storageLocation; @@ -165,6 +174,11 @@ private static StorageType getStorageType(StorageTypeProto proto) throws * @throws IOException In case, the storage type specified is invalid. */ public StorageReportProto getProtoBufMessage() throws IOException { + return getProtoBufMessage(null); + } + + public StorageReportProto getProtoBufMessage(ConfigurationSource conf) + throws IOException { StorageReportProto.Builder srb = StorageReportProto.newBuilder(); return srb.setStorageUuid(getId()) .setCapacity(getCapacity()) @@ -174,6 +188,8 @@ public StorageReportProto getProtoBufMessage() throws IOException { .setStorageType(getStorageTypeProto()) .setStorageLocation(getStorageLocation()) .setFailed(isFailed()) + .setFreeSpaceToSpare(conf != null ? + VolumeUsage.getMinVolumeFreeSpace(conf, getCapacity()) : 0) .build(); } @@ -276,6 +292,7 @@ public static class Builder { private long scmUsed; private long remaining; private long committed; + private long freeSpaceToSpare; private StorageType storageType; private String storageLocation; @@ -355,6 +372,18 @@ public Builder setCommitted(long committed) { return this; } + /** + * Sets the free space available to spare. + * (depends on datanode volume config, + * consider 'hdds.datanode.volume.min.*' configuration properties) + * @param freeSpaceToSpare the size of free volume space available to spare + * @return StorageLocationReport.Builder + */ + public Builder setFreeSpaceToSpare(long freeSpaceToSpare) { + this.freeSpaceToSpare = freeSpaceToSpare; + return this; + } + /** * Sets the storageLocation. * @@ -373,7 +402,7 @@ public Builder setStorageLocation(String storageLocationValue) { */ public StorageLocationReport build() { return new StorageLocationReport(id, failed, capacity, scmUsed, - remaining, committed, storageType, storageLocation); + remaining, committed, freeSpaceToSpare, storageType, storageLocation); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java index 707593bb7d89..74c4336bc652 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java @@ -35,6 +35,8 @@ public interface StorageLocationReportMXBean { long getCommitted(); + long getFreeSpaceToSpare(); + String getStorageLocation(); String getStorageTypeName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java index 3ee33b75ec98..72703231e7f8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.ozone.container.common.volume; -import org.apache.hadoop.hdds.conf.ConfigurationSource; - import java.util.HashMap; import java.util.Map; import java.util.function.Predicate; @@ -44,8 +42,10 @@ public boolean test(HddsVolume vol) { long free = vol.getAvailable(); long committed = vol.getCommittedBytes(); long available = free - committed; - boolean hasEnoughSpace = hasVolumeEnoughSpace(volumeCapacity, free, - committed, requiredSpace, vol.getConf()); + long volumeFreeSpaceTpSpare = + VolumeUsage.getMinVolumeFreeSpace(vol.getConf(), volumeCapacity); + boolean hasEnoughSpace = VolumeUsage.hasVolumeEnoughSpace(free, committed, + requiredSpace, volumeFreeSpaceTpSpare); mostAvailableSpace = Math.max(available, mostAvailableSpace); @@ -70,17 +70,6 @@ public String toString() { ", volumes: " + fullVolumes; } - public static boolean hasVolumeEnoughSpace(long volumeCapacity, - long volumeAvailableSpace, - long volumeCommittedBytesCount, - long requiredSpace, - ConfigurationSource conf) { - long volumeFreeSpace = - VolumeUsage.getMinVolumeFreeSpace(conf, volumeCapacity); - return (volumeAvailableSpace - volumeCommittedBytesCount) > - Math.max(requiredSpace, volumeFreeSpace); - } - private static class AvailableSpace { private final long free; private final long committed; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index e7a06abc9e36..57cf0a8b9dd0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -162,6 +162,14 @@ public static long getMinVolumeFreeSpace(ConfigurationSource conf, } + public static boolean hasVolumeEnoughSpace(long volumeAvailableSpace, + long volumeCommittedBytesCount, + long requiredSpace, + long volumeFreeSpaceToSpare) { + return (volumeAvailableSpace - volumeCommittedBytesCount) > + Math.max(requiredSpace, volumeFreeSpaceToSpare); + } + /** * Class representing precomputed space values of a volume. * This class is intended to store precomputed values, such as capacity diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index 973ab736a68d..2994073c0240 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -180,6 +180,7 @@ message StorageReportProto { optional StorageTypeProto storageType = 6 [default = DISK]; optional bool failed = 7 [default = false]; optional uint64 committed = 8 [default = 0]; + optional uint64 freeSpaceToSpare = 9 [default = 0]; } message MetadataStorageReportProto { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 906ebf324967..78b1a031b8e8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.scm.node.DatanodeInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; +import org.apache.hadoop.ozone.container.common.volume.VolumeUsage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -51,8 +52,6 @@ import java.util.function.Function; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.container.common.volume.AvailableSpaceFilter.hasVolumeEnoughSpace; - /** * This policy implements a set of invariants which are common * for all basic placement policies, acts as the repository of helper @@ -312,9 +311,9 @@ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, if (dataSizeRequired > 0) { for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) { - if (hasVolumeEnoughSpace(reportProto.getCapacity(), - reportProto.getRemaining(), reportProto.getCommitted(), - dataSizeRequired, conf)) { + if (VolumeUsage.hasVolumeEnoughSpace(reportProto.getRemaining(), + reportProto.getCommitted(), dataSizeRequired, + reportProto.getFreeSpaceToSpare())) { enoughForData = true; break; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java index 5c84dfa426cf..15dc0041f686 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java @@ -54,11 +54,11 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; import java.util.function.Function; @@ -467,7 +467,7 @@ protected List chooseDatanodesInternal( } @Test - public void testDatanodeIsInvalidIsCaseOfIncreasingCommittedBytes() { + public void testDatanodeIsInvalidInCaseOfIncreasingCommittedBytes() { NodeManager nodeMngr = mock(NodeManager.class); ConfigurationSource confing = mock(ConfigurationSource.class); when(confing.isConfigured(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE))) @@ -475,7 +475,7 @@ public void testDatanodeIsInvalidIsCaseOfIncreasingCommittedBytes() { when(confing.getStorageSize(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE), eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT), eq(StorageUnit.BYTES))).thenReturn(100000.0); - UUID datanodeUuid = spy(UUID.randomUUID()); + UUID datanodeUuid = UUID.randomUUID(); DummyPlacementPolicy placementPolicy = new DummyPlacementPolicy(nodeMngr, confing, 1); DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); @@ -487,29 +487,19 @@ public void testDatanodeIsInvalidIsCaseOfIncreasingCommittedBytes() { when(datanodeInfo.getNodeStatus()).thenReturn(nodeStatus); when(nodeMngr.getNodeByUuid(eq(datanodeUuid))).thenReturn(datanodeInfo); + // capacity = 200000, used = 90000, remaining = 101000, committed = 500 StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 = - StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder() - .setCommitted(500) - .setCapacity(200000) - .setRemaining(101000) - .setStorageUuid(UUID.randomUUID().toString()) - .setStorageLocation("/data/hdds") - .build(); + HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds", + 200000, 90000, 101000, DISK).toBuilder() + .setCommitted(500).build(); + // capacity = 200000, used = 90000, remaining = 101000, committed = 1000 StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 = - StorageContainerDatanodeProtocolProtos.StorageReportProto.newBuilder() - .setCommitted(1000) - .setCapacity(200000) - .setRemaining(101000) - .setStorageUuid(UUID.randomUUID().toString()) - .setStorageLocation("/data/hdds") - .build(); + HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds", + 200000, 90000, 101000, DISK).toBuilder() + .setCommitted(1000).build(); StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto - metaReport = - StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto - .newBuilder() - .setRemaining(200) - .setStorageLocation("/data/metadata") - .build(); + metaReport = HddsTestUtils.createMetadataStorageReport("/data/metadata", + 200); when(datanodeInfo.getStorageReports()) .thenReturn(Collections.singletonList(storageReport1)) .thenReturn(Collections.singletonList(storageReport2)); @@ -517,9 +507,23 @@ public void testDatanodeIsInvalidIsCaseOfIncreasingCommittedBytes() { .thenReturn(Collections.singletonList(metaReport)); - // 500 committed bytes + // 500 committed bytes: + // + // 101000 500 + // | | + // (remaining - committed) > Math.max(4000, + // VolumeUsage.getMinVolumeFreeSpace(conf,volumeCapacity)) + // | + // 200000 + // + // VolumeUsage.getMinVolumeFreeSpace(conf,volumeCapacity) == 100000 + // (take a look to ConfigurationSource mock above) + // + // Summary: 101000 - 500 > 100000 == true assertTrue(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); - // 1000 committed bytes + + // 1000 committed bytes: + // Summary: 101000 - 1000 > 100000 == false assertFalse(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java index 87beef0d5204..9b8998db4f7c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java @@ -155,8 +155,11 @@ private void printInfo(DatanodeUsage info) { + " B", StringUtils.byteDesc(info.getRemaining())); System.out.printf("%-13s: %s %n", "Remaining %", PERCENT_FORMAT.format(info.getRemainingRatio())); - System.out.printf("%-13s: %s (%s) %n", "Committed", info.getCommitted() - + " B", StringUtils.byteDesc(info.getCommitted())); + System.out.printf("%-13s: %s (%s) %n", "Container Pre-allocated", + info.getCommitted() + " B", StringUtils.byteDesc(info.getCommitted())); + System.out.printf("%-13s: %s (%s) %n", "Remaining Allocatable", + (info.getRemaining() - info.getCommitted()) + " B", + StringUtils.byteDesc((info.getRemaining() - info.getCommitted()))); System.out.printf("%-13s: %d %n%n", "Container(s)", info.getContainerCount()); } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index eeab2408329f..204609f66fec 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -63,7 +63,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 34563456 }, "pipelines": [ { @@ -96,7 +97,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 34562 }, "pipelines": [ { @@ -129,7 +131,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 4576435 }, "pipelines": [ { @@ -162,7 +165,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 3453121 }, "pipelines": [ { @@ -195,7 +199,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 3457623435 }, "pipelines": [ { @@ -234,7 +239,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 345624 }, "pipelines": [ { @@ -273,7 +279,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 123464574 }, "pipelines": [ { @@ -312,7 +319,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 556721345 }, "pipelines": [ { @@ -351,7 +359,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 45671235234 }, "pipelines": [ { @@ -390,7 +399,8 @@ "storageReport": { "capacity": 140737488355328, "used": 0, - "remaining": 110737488355328 + "remaining": 110737488355328, + "committed": 0 }, "pipelines": [], "containers": 0, @@ -410,7 +420,8 @@ "storageReport": { "capacity": 805306368000, "used": 644245094400, - "remaining": 121061273600 + "remaining": 121061273600, + "committed": 4572345234 }, "pipelines": [ { @@ -443,7 +454,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 92757023244288 + "remaining": 92757023244288, + "committed": 34563453 }, "pipelines": [ { @@ -476,7 +488,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 94784247808 + "remaining": 94784247808, + "committed": 7234234 }, "pipelines": [ { @@ -515,7 +528,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 92757023244288 + "remaining": 92757023244288, + "committed": 34562346 }, "pipelines": [ { @@ -548,7 +562,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 76757023244288 + "remaining": 76757023244288, + "committed": 834324523 }, "pipelines": [ { @@ -581,7 +596,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 66757023244288 + "remaining": 66757023244288, + "committed": 346467345 }, "pipelines": [ { @@ -620,7 +636,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 96157023244288 + "remaining": 96157023244288, + "committed": 45245456 }, "pipelines": [ { @@ -653,7 +670,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 94757023244288 + "remaining": 94757023244288, + "committed": 45673234 }, "pipelines": [ { diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx index 780874b8763f..9263c6817beb 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx @@ -56,7 +56,7 @@ class StorageBar extends React.Component {
Ozone Used ({size(used)})
Non Ozone Used ({size(nonOzoneUsed)})
Remaining ({size(remaining)})
-
Committed ({size(committed)})
+
Container Pre-allocated ({size(committed)})
); const metaElement = showMeta ?
{size(used)} + {size(nonOzoneUsed)} / {size(total)}
: null; From 58b6abab5193477a2e9371894be4db6c20e81885 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Sun, 10 Dec 2023 11:43:00 +0300 Subject: [PATCH 7/9] HDDS-9807. fix test to check datanode availability (configuration is not relevant anymore) --- .../scm/TestSCMCommonPlacementPolicy.java | 27 +++++++------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java index 15dc0041f686..87497a9f0709 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java @@ -23,7 +23,6 @@ import com.google.common.collect.Sets; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -51,8 +50,6 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -469,15 +466,9 @@ protected List chooseDatanodesInternal( @Test public void testDatanodeIsInvalidInCaseOfIncreasingCommittedBytes() { NodeManager nodeMngr = mock(NodeManager.class); - ConfigurationSource confing = mock(ConfigurationSource.class); - when(confing.isConfigured(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE))) - .thenReturn(true); - when(confing.getStorageSize(eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE), - eq(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT), - eq(StorageUnit.BYTES))).thenReturn(100000.0); UUID datanodeUuid = UUID.randomUUID(); DummyPlacementPolicy placementPolicy = - new DummyPlacementPolicy(nodeMngr, confing, 1); + new DummyPlacementPolicy(nodeMngr, conf, 1); DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); when(datanodeDetails.getUuid()).thenReturn(datanodeUuid); @@ -491,12 +482,16 @@ public void testDatanodeIsInvalidInCaseOfIncreasingCommittedBytes() { StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 = HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds", 200000, 90000, 101000, DISK).toBuilder() - .setCommitted(500).build(); + .setCommitted(500) + .setFreeSpaceToSpare(10000) + .build(); // capacity = 200000, used = 90000, remaining = 101000, committed = 1000 StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 = HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds", 200000, 90000, 101000, DISK).toBuilder() - .setCommitted(1000).build(); + .setCommitted(1000) + .setFreeSpaceToSpare(100000) + .build(); StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto metaReport = HddsTestUtils.createMetadataStorageReport("/data/metadata", 200); @@ -511,13 +506,9 @@ public void testDatanodeIsInvalidInCaseOfIncreasingCommittedBytes() { // // 101000 500 // | | - // (remaining - committed) > Math.max(4000, - // VolumeUsage.getMinVolumeFreeSpace(conf,volumeCapacity)) + // (remaining - committed) > Math.max(4000, freeSpaceToSpare) // | - // 200000 - // - // VolumeUsage.getMinVolumeFreeSpace(conf,volumeCapacity) == 100000 - // (take a look to ConfigurationSource mock above) + // 100000 // // Summary: 101000 - 500 > 100000 == true assertTrue(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); From 873aeaf5c65e94ccb90d28d30e76f42c1cfd8f57 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Mon, 11 Dec 2023 11:25:17 +0300 Subject: [PATCH 8/9] HDDS-9807. add FreeSpaceToSpare datanode volume metric to the datanode usageinfo command output --- .../common/volume/AvailableSpaceFilter.java | 4 +-- .../container/ozoneimpl/OzoneContainer.java | 2 +- .../src/main/proto/hdds.proto | 1 + .../balancer/ContainerBalancerTask.java | 2 +- .../container/placement/metrics/NodeStat.java | 9 +++++- .../placement/metrics/SCMNodeMetric.java | 7 +++-- .../placement/metrics/SCMNodeStat.java | 30 +++++++++++++++---- .../hdds/scm/node/DatanodeUsageInfo.java | 1 + .../hadoop/hdds/scm/node/SCMNodeManager.java | 10 +++++-- .../hdds/scm/container/MockNodeManager.java | 2 +- .../balancer/TestContainerBalancerTask.java | 3 +- .../balancer/TestFindTargetStrategy.java | 22 +++++++------- .../TestSCMContainerPlacementCapacity.java | 8 ++--- .../placement/TestDatanodeMetrics.java | 8 ++--- .../scm/cli/datanode/UsageInfoSubcommand.java | 10 +++++++ .../api/TestNSSummaryEndpointWithFSO.java | 2 +- .../api/TestNSSummaryEndpointWithLegacy.java | 2 +- 17 files changed, 84 insertions(+), 39 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java index 72703231e7f8..622c85a52fa0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java @@ -42,10 +42,10 @@ public boolean test(HddsVolume vol) { long free = vol.getAvailable(); long committed = vol.getCommittedBytes(); long available = free - committed; - long volumeFreeSpaceTpSpare = + long volumeFreeSpaceToSpare = VolumeUsage.getMinVolumeFreeSpace(vol.getConf(), volumeCapacity); boolean hasEnoughSpace = VolumeUsage.hasVolumeEnoughSpace(free, committed, - requiredSpace, volumeFreeSpaceTpSpare); + requiredSpace, volumeFreeSpaceToSpare); mostAvailableSpace = Math.max(available, mostAvailableSpace); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 1e34fb104939..277ab4464e30 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -524,7 +524,7 @@ public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() = StorageContainerDatanodeProtocolProtos. NodeReportProto.newBuilder(); for (int i = 0; i < reports.length; i++) { - nrb.addStorageReport(reports[i].getProtoBufMessage()); + nrb.addStorageReport(reports[i].getProtoBufMessage(config)); } StorageLocationReport[] metaReports = metaVolumeSet.getStorageReport(); diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index b65355eeb5c2..3f346300b3ed 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -188,6 +188,7 @@ message DatanodeUsageInfoProto { optional DatanodeDetailsProto node = 4; optional int64 containerCount = 5; optional int64 committed = 6; + optional int64 freeSpaceToSpare = 7; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index c541c5d6c941..abbc50ac86a5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -932,7 +932,7 @@ private long ratioToBytes(Long nodeCapacity, double utilizationRatio) { return 0; } SCMNodeStat aggregatedStats = new SCMNodeStat( - 0, 0, 0, 0); + 0, 0, 0, 0, 0); for (DatanodeUsageInfo node : nodes) { aggregatedStats.add(node.getScmNodeStat()); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java index b963e8f0745e..eedc89dfc585 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java @@ -48,6 +48,12 @@ interface NodeStat { */ LongMetric getCommitted(); + /** + * Get a min free space available to spare on the node. + * @return a min free space available to spare + */ + LongMetric getFreeSpaceToSpare(); + /** * Set the total/used/remaining space. * @param capacity - total space. @@ -55,7 +61,8 @@ interface NodeStat { * @param remain - remaining space. */ @VisibleForTesting - void set(long capacity, long used, long remain, long committed); + void set(long capacity, long used, long remain, long committed, + long freeSpaceToSpare); /** * Adding of the stat. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index a86c40cf4a1f..330bf67416ae 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -46,9 +46,9 @@ public SCMNodeMetric(SCMNodeStat stat) { */ @VisibleForTesting public SCMNodeMetric(long capacity, long used, long remaining, - long committed) { + long committed, long freeSpaceToSpare) { this.stat = new SCMNodeStat(); - this.stat.set(capacity, used, remaining, committed); + this.stat.set(capacity, used, remaining, committed, freeSpaceToSpare); } /** @@ -159,7 +159,8 @@ public SCMNodeStat get() { @Override public void set(SCMNodeStat value) { stat.set(value.getCapacity().get(), value.getScmUsed().get(), - value.getRemaining().get(), value.getCommitted().get()); + value.getRemaining().get(), value.getCommitted().get(), + value.getFreeSpaceToSpare().get()); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java index 712b6d198593..2a848a04eff5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java @@ -29,17 +29,19 @@ public class SCMNodeStat implements NodeStat { private LongMetric scmUsed; private LongMetric remaining; private LongMetric committed; + private LongMetric freeSpaceToSpare; public SCMNodeStat() { - this(0L, 0L, 0L, 0L); + this(0L, 0L, 0L, 0L, 0L); } public SCMNodeStat(SCMNodeStat other) { this(other.capacity.get(), other.scmUsed.get(), other.remaining.get(), - other.committed.get()); + other.committed.get(), other.freeSpaceToSpare.get()); } - public SCMNodeStat(long capacity, long used, long remaining, long committed) { + public SCMNodeStat(long capacity, long used, long remaining, long committed, + long freeSpaceToSpare) { Preconditions.checkArgument(capacity >= 0, "Capacity cannot be " + "negative."); Preconditions.checkArgument(used >= 0, "used space cannot be " + @@ -50,6 +52,7 @@ public SCMNodeStat(long capacity, long used, long remaining, long committed) { this.scmUsed = new LongMetric(used); this.remaining = new LongMetric(remaining); this.committed = new LongMetric(committed); + this.freeSpaceToSpare = new LongMetric(freeSpaceToSpare); } /** @@ -85,6 +88,15 @@ public LongMetric getCommitted() { return committed; } + /** + * Get a min space available to spare on the node. + * @return a min free space available to spare on the node + */ + @Override + public LongMetric getFreeSpaceToSpare() { + return freeSpaceToSpare; + } + /** * Set the capacity, used and remaining space on a datanode. * @@ -95,7 +107,7 @@ public LongMetric getCommitted() { @Override @VisibleForTesting public void set(long newCapacity, long newUsed, long newRemaining, - long newCommitted) { + long newCommitted, long newFreeSpaceToSpare) { Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " + "negative."); Preconditions.checkArgument(newUsed >= 0, "used space cannot be " + @@ -107,6 +119,7 @@ public void set(long newCapacity, long newUsed, long newRemaining, this.scmUsed = new LongMetric(newUsed); this.remaining = new LongMetric(newRemaining); this.committed = new LongMetric(newCommitted); + this.freeSpaceToSpare = new LongMetric(newFreeSpaceToSpare); } /** @@ -121,6 +134,8 @@ public SCMNodeStat add(NodeStat stat) { this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get()); this.remaining.set(this.getRemaining().get() + stat.getRemaining().get()); this.committed.set(this.getCommitted().get() + stat.getCommitted().get()); + this.freeSpaceToSpare.set(this.freeSpaceToSpare.get() + + stat.getFreeSpaceToSpare().get()); return this; } @@ -136,6 +151,8 @@ public SCMNodeStat subtract(NodeStat stat) { this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get()); this.remaining.set(this.getRemaining().get() - stat.getRemaining().get()); this.committed.set(this.getCommitted().get() - stat.getCommitted().get()); + this.freeSpaceToSpare.set(freeSpaceToSpare.get() - + stat.getFreeSpaceToSpare().get()); return this; } @@ -146,7 +163,8 @@ public boolean equals(Object to) { return capacity.isEqual(tempStat.getCapacity().get()) && scmUsed.isEqual(tempStat.getScmUsed().get()) && remaining.isEqual(tempStat.getRemaining().get()) && - committed.isEqual(tempStat.getCommitted().get()); + committed.isEqual(tempStat.getCommitted().get()) && + freeSpaceToSpare.isEqual(tempStat.freeSpaceToSpare.get()); } return false; } @@ -154,6 +172,6 @@ public boolean equals(Object to) { @Override public int hashCode() { return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get() ^ - committed.get()); + committed.get() ^ freeSpaceToSpare.get()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java index 331b32e732f2..4f7df4969063 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java @@ -206,6 +206,7 @@ private DatanodeUsageInfoProto.Builder toProtoBuilder(int clientVersion) { builder.setUsed(scmNodeStat.getScmUsed().get()); builder.setRemaining(scmNodeStat.getRemaining().get()); builder.setCommitted(scmNodeStat.getCommitted().get()); + builder.setFreeSpaceToSpare(scmNodeStat.getFreeSpaceToSpare().get()); } builder.setContainerCount(containerCount); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index a0ce8e572648..59ac09103250 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -856,14 +856,17 @@ public SCMNodeStat getStats() { long used = 0L; long remaining = 0L; long committed = 0L; + long freeSpaceToSpare = 0L; for (SCMNodeStat stat : getNodeStats().values()) { capacity += stat.getCapacity().get(); used += stat.getScmUsed().get(); remaining += stat.getRemaining().get(); committed += stat.getCommitted().get(); + freeSpaceToSpare += stat.getFreeSpaceToSpare().get(); } - return new SCMNodeStat(capacity, used, remaining, committed); + return new SCMNodeStat(capacity, used, remaining, committed, + freeSpaceToSpare); } /** @@ -969,6 +972,7 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { long used = 0L; long remaining = 0L; long committed = 0L; + long freeSpaceToSpare = 0L; final DatanodeInfo datanodeInfo = nodeStateManager .getNode(datanodeDetails); @@ -979,8 +983,10 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { used += reportProto.getScmUsed(); remaining += reportProto.getRemaining(); committed += reportProto.getCommitted(); + freeSpaceToSpare += reportProto.getFreeSpaceToSpare(); } - return new SCMNodeStat(capacity, used, remaining, committed); + return new SCMNodeStat(capacity, used, remaining, committed, + freeSpaceToSpare); } catch (NodeNotFoundException e) { LOG.warn("Cannot generate NodeStat, datanode {} not found.", datanodeDetails.getUuidString()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index e40aeb2cba81..794dedceef06 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -227,7 +227,7 @@ private void populateNodeMetric(DatanodeDetails datanodeDetails, int x) { NODES[x % NODES.length].capacity - NODES[x % NODES.length].used; newStat.set( (NODES[x % NODES.length].capacity), - (NODES[x % NODES.length].used), remaining, 0); + (NODES[x % NODES.length].used), remaining, 0, 100000); this.nodeMetricMap.put(datanodeDetails, newStat); aggregateStat.add(newStat); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index 1c0693cf9081..56d02dabb5fa 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -1207,7 +1207,8 @@ private double createCluster() { datanodeCapacity = (long) (datanodeUsedSpace / nodeUtilizations.get(i)); } SCMNodeStat stat = new SCMNodeStat(datanodeCapacity, datanodeUsedSpace, - datanodeCapacity - datanodeUsedSpace, 0); + datanodeCapacity - datanodeUsedSpace, 0, + datanodeCapacity - datanodeUsedSpace - 1); nodesInCluster.get(i).setScmNodeStat(stat); clusterUsedSpace += datanodeUsedSpace; clusterCapacity += datanodeCapacity; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java index b11f9bfd10c5..bb6f17bcc105 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java @@ -56,11 +56,11 @@ public void testFindTargetGreedyByUsage() { //create three datanodes with different usageinfo DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40, 0)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40, 0, 30)); DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60, 0)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60, 0, 30)); DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80, 0)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80, 0, 30)); //insert in ascending order overUtilizedDatanodes.add(dui1); @@ -98,11 +98,11 @@ public void testFindTargetGreedyByUsage() { public void testResetPotentialTargets() { // create three datanodes with different usage infos DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70, 0)); + .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70, 0, 50)); DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80, 0)); + .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80, 0, 60)); DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90, 0)); + .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90, 0, 70)); List potentialTargets = new ArrayList<>(); potentialTargets.add(dui1); @@ -179,18 +179,18 @@ public void testFindTargetGreedyByNetworkTopology() { List overUtilizedDatanodes = new ArrayList<>(); //set the farthest target with the lowest usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90, 0))); + new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90, 0, 80))); //set the tree targets, which have the same network topology distance //to source , with different usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20, 0))); + new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20, 0, 10))); overUtilizedDatanodes.add( - new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40, 0))); + new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40, 0, 30))); overUtilizedDatanodes.add( - new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60, 0))); + new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60, 0, 50))); //set the nearest target with the highest usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10, 0))); + new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10, 0, 5))); FindTargetGreedyByNetworkTopology findTargetGreedyByNetworkTopology = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index 953fe2950ec9..e51f9731ad4a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -103,13 +103,13 @@ public void chooseDatanodes() throws SCMException { .thenReturn(new ArrayList<>(datanodes)); when(mockNodeManager.getNodeStat(any())) - .thenReturn(new SCMNodeMetric(100L, 0L, 100L, 0)); + .thenReturn(new SCMNodeMetric(100L, 0L, 100L, 0, 90)); when(mockNodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(100L, 90L, 10L, 0)); + .thenReturn(new SCMNodeMetric(100L, 90L, 10L, 0, 9)); when(mockNodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0)); + .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0, 19)); when(mockNodeManager.getNodeStat(datanodes.get(4))) - .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0)); + .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0, 20)); when(mockNodeManager.getNodeByUuid(any(UUID.class))).thenAnswer( invocation -> datanodes.stream() .filter(dn -> dn.getUuid().equals(invocation.getArgument(0))) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java index 3600d92c4b98..9c9bfad582f7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java @@ -31,13 +31,13 @@ public class TestDatanodeMetrics { @Test public void testSCMNodeMetric() { - SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L, 0); + SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L, 0, 80); assertEquals((long) stat.getCapacity().get(), 100L); assertEquals(10L, (long) stat.getScmUsed().get()); assertEquals(90L, (long) stat.getRemaining().get()); SCMNodeMetric metric = new SCMNodeMetric(stat); - SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L, 0); + SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L, 0, 80); assertEquals(100L, (long) stat.getCapacity().get()); assertEquals(10L, (long) stat.getScmUsed().get()); assertEquals(90L, (long) stat.getRemaining().get()); @@ -53,8 +53,8 @@ public void testSCMNodeMetric() { assertTrue(metric.isGreater(zeroMetric.get())); // Another case when nodes have similar weight - SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L, 0); - SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L, 0); + SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L, 0, 100000); + SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L, 0, 100000); assertTrue(new SCMNodeMetric(stat2).isGreater(stat1)); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java index 9b8998db4f7c..0780e2e21850 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java @@ -160,6 +160,9 @@ private void printInfo(DatanodeUsage info) { System.out.printf("%-13s: %s (%s) %n", "Remaining Allocatable", (info.getRemaining() - info.getCommitted()) + " B", StringUtils.byteDesc((info.getRemaining() - info.getCommitted()))); + System.out.printf("%-13s: %s (%s) %n", "Free Space To Spare", + info.getFreeSpaceToSpare() + " B", + StringUtils.byteDesc(info.getFreeSpaceToSpare())); System.out.printf("%-13s: %d %n%n", "Container(s)", info.getContainerCount()); } @@ -187,6 +190,7 @@ private static class DatanodeUsage { private long used = 0; private long remaining = 0; private long committed = 0; + private long freeSpaceToSpare = 0; private long containerCount = 0; DatanodeUsage(HddsProtos.DatanodeUsageInfoProto proto) { @@ -208,6 +212,9 @@ private static class DatanodeUsage { if (proto.hasContainerCount()) { containerCount = proto.getContainerCount(); } + if (proto.hasFreeSpaceToSpare()) { + freeSpaceToSpare = proto.getFreeSpaceToSpare(); + } } public DatanodeDetails getDatanodeDetails() { @@ -232,6 +239,9 @@ public long getRemaining() { public long getCommitted() { return committed; } + public long getFreeSpaceToSpare() { + return freeSpaceToSpare; + } public long getContainerCount() { return containerCount; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index c5581f676f5c..cbe850b918f0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -1248,6 +1248,6 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE, 0); + ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index ec781fbbf88d..ba00f843f447 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -1286,6 +1286,6 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE, 0); + ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1); } } From a6ff69abfdd6acdb9ef692477eab6762cd7ad5a9 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Mon, 11 Dec 2023 15:19:12 +0300 Subject: [PATCH 9/9] HDDS-9807. align datanode usageinfo sub-command output --- .../scm/cli/datanode/UsageInfoSubcommand.java | 10 +++--- .../cli/datanode/TestUsageInfoSubcommand.java | 33 +++++++++++++++++++ 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java index 0780e2e21850..b967fa0658c0 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java @@ -155,16 +155,16 @@ private void printInfo(DatanodeUsage info) { + " B", StringUtils.byteDesc(info.getRemaining())); System.out.printf("%-13s: %s %n", "Remaining %", PERCENT_FORMAT.format(info.getRemainingRatio())); - System.out.printf("%-13s: %s (%s) %n", "Container Pre-allocated", + System.out.printf("%-13s: %d %n", "Container(s)", + info.getContainerCount()); + System.out.printf("%-24s: %s (%s) %n", "Container Pre-allocated", info.getCommitted() + " B", StringUtils.byteDesc(info.getCommitted())); - System.out.printf("%-13s: %s (%s) %n", "Remaining Allocatable", + System.out.printf("%-24s: %s (%s) %n", "Remaining Allocatable", (info.getRemaining() - info.getCommitted()) + " B", StringUtils.byteDesc((info.getRemaining() - info.getCommitted()))); - System.out.printf("%-13s: %s (%s) %n", "Free Space To Spare", + System.out.printf("%-24s: %s (%s) %n%n", "Free Space To Spare", info.getFreeSpaceToSpare() + " B", StringUtils.byteDesc(info.getFreeSpaceToSpare())); - System.out.printf("%-13s: %d %n%n", "Container(s)", - info.getContainerCount()); } /** diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java index 0cc8ed9be639..a52a0a7ed8f5 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java @@ -19,6 +19,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.codec.CharEncoding; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.client.ScmClient; @@ -97,6 +98,38 @@ public void testCorrectJsonValuesInReport() throws IOException { json.get(0).get("containerCount").longValue()); } + @Test + public void testOutputDataFieldsAligning() throws IOException { + // given + ScmClient scmClient = mock(ScmClient.class); + Mockito.when(scmClient.getDatanodeUsageInfo( + Mockito.anyBoolean(), Mockito.anyInt())) + .thenAnswer(invocation -> getUsageProto()); + + CommandLine c = new CommandLine(cmd); + c.parseArgs("-m"); + + // when + cmd.execute(scmClient); + + // then + String output = outContent.toString(CharEncoding.UTF_8); + Assertions.assertTrue(output.contains("UUID :")); + Assertions.assertTrue(output.contains("IP Address :")); + Assertions.assertTrue(output.contains("Hostname :")); + Assertions.assertTrue(output.contains("Capacity :")); + Assertions.assertTrue(output.contains("Total Used :")); + Assertions.assertTrue(output.contains("Total Used % :")); + Assertions.assertTrue(output.contains("Ozone Used :")); + Assertions.assertTrue(output.contains("Ozone Used % :")); + Assertions.assertTrue(output.contains("Remaining :")); + Assertions.assertTrue(output.contains("Remaining % :")); + Assertions.assertTrue(output.contains("Container(s) :")); + Assertions.assertTrue(output.contains("Container Pre-allocated :")); + Assertions.assertTrue(output.contains("Remaining Allocatable :")); + Assertions.assertTrue(output.contains("Free Space To Spare :")); + } + private List getUsageProto() { List result = new ArrayList<>(); result.add(HddsProtos.DatanodeUsageInfoProto.newBuilder()