diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java index 0222050da5e5..f31d45a7782b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.impl; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto. StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto. @@ -27,6 +28,7 @@ StorageContainerDatanodeProtocolProtos.StorageTypeProto; import org.apache.hadoop.ozone.container.common.interfaces .StorageLocationReportMXBean; +import org.apache.hadoop.ozone.container.common.volume.VolumeUsage; import java.io.IOException; @@ -42,17 +44,22 @@ public final class StorageLocationReport implements private final long capacity; private final long scmUsed; private final long remaining; + private final long committed; + private final long freeSpaceToSpare; private final StorageType storageType; private final String storageLocation; + @SuppressWarnings("checkstyle:parameternumber") private StorageLocationReport(String id, boolean failed, long capacity, - long scmUsed, long remaining, StorageType storageType, - String storageLocation) { + long scmUsed, long remaining, long committed, long freeSpaceToSpare, + StorageType storageType, String storageLocation) { this.id = id; this.failed = failed; this.capacity = capacity; this.scmUsed = scmUsed; this.remaining = remaining; + this.committed = committed; + this.freeSpaceToSpare = freeSpaceToSpare; this.storageType = storageType; this.storageLocation = storageLocation; } @@ -82,6 +89,16 @@ public long getRemaining() { return remaining; } + @Override + public long getCommitted() { + return committed; + } + + @Override + public long getFreeSpaceToSpare() { + return freeSpaceToSpare; + } + @Override public String getStorageLocation() { return storageLocation; @@ -157,14 +174,22 @@ private static StorageType getStorageType(StorageTypeProto proto) throws * @throws IOException In case, the storage type specified is invalid. */ public StorageReportProto getProtoBufMessage() throws IOException { + return getProtoBufMessage(null); + } + + public StorageReportProto getProtoBufMessage(ConfigurationSource conf) + throws IOException { StorageReportProto.Builder srb = StorageReportProto.newBuilder(); return srb.setStorageUuid(getId()) .setCapacity(getCapacity()) .setScmUsed(getScmUsed()) .setRemaining(getRemaining()) + .setCommitted(getCommitted()) .setStorageType(getStorageTypeProto()) .setStorageLocation(getStorageLocation()) .setFailed(isFailed()) + .setFreeSpaceToSpare(conf != null ? + VolumeUsage.getMinVolumeFreeSpace(conf, getCapacity()) : 0) .build(); } @@ -266,6 +291,8 @@ public static class Builder { private long capacity; private long scmUsed; private long remaining; + private long committed; + private long freeSpaceToSpare; private StorageType storageType; private String storageLocation; @@ -334,6 +361,29 @@ public Builder setStorageType(StorageType storageTypeValue) { return this; } + /** + * Sets the committed bytes count. + * (bytes for previously created containers) + * @param committed previously created containers size + * @return StorageLocationReport.Builder + */ + public Builder setCommitted(long committed) { + this.committed = committed; + return this; + } + + /** + * Sets the free space available to spare. + * (depends on datanode volume config, + * consider 'hdds.datanode.volume.min.*' configuration properties) + * @param freeSpaceToSpare the size of free volume space available to spare + * @return StorageLocationReport.Builder + */ + public Builder setFreeSpaceToSpare(long freeSpaceToSpare) { + this.freeSpaceToSpare = freeSpaceToSpare; + return this; + } + /** * Sets the storageLocation. * @@ -352,7 +402,7 @@ public Builder setStorageLocation(String storageLocationValue) { */ public StorageLocationReport build() { return new StorageLocationReport(id, failed, capacity, scmUsed, - remaining, storageType, storageLocation); + remaining, committed, freeSpaceToSpare, storageType, storageLocation); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java index fd063678137d..74c4336bc652 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java @@ -33,6 +33,10 @@ public interface StorageLocationReportMXBean { long getRemaining(); + long getCommitted(); + + long getFreeSpaceToSpare(); + String getStorageLocation(); String getStorageTypeName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java index 13041eb4d662..622c85a52fa0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java @@ -25,7 +25,7 @@ * Filter for selecting volumes with enough space for a new container. * Keeps track of ineligible volumes for logging/debug purposes. */ -class AvailableSpaceFilter implements Predicate { +public class AvailableSpaceFilter implements Predicate { private final long requiredSpace; private final Map fullVolumes = @@ -42,10 +42,10 @@ public boolean test(HddsVolume vol) { long free = vol.getAvailable(); long committed = vol.getCommittedBytes(); long available = free - committed; - long volumeFreeSpace = + long volumeFreeSpaceToSpare = VolumeUsage.getMinVolumeFreeSpace(vol.getConf(), volumeCapacity); - boolean hasEnoughSpace = - available > Math.max(requiredSpace, volumeFreeSpace); + boolean hasEnoughSpace = VolumeUsage.hasVolumeEnoughSpace(free, committed, + requiredSpace, volumeFreeSpaceToSpare); mostAvailableSpace = Math.max(available, mostAvailableSpace); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 985ddea8deb8..3c0b6e618ee1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -464,6 +464,7 @@ public StorageLocationReport[] getStorageReport() { long scmUsed = 0; long remaining = 0; long capacity = 0; + long committed = 0; String rootDir = ""; failed = true; if (volumeInfo.isPresent()) { @@ -472,6 +473,8 @@ public StorageLocationReport[] getStorageReport() { scmUsed = volumeInfo.get().getScmUsed(); remaining = volumeInfo.get().getAvailable(); capacity = volumeInfo.get().getCapacity(); + committed = (volume instanceof HddsVolume) ? + ((HddsVolume) volume).getCommittedBytes() : 0; failed = false; } catch (UncheckedIOException ex) { LOG.warn("Failed to get scmUsed and remaining for container " + @@ -491,6 +494,7 @@ public StorageLocationReport[] getStorageReport() { .setCapacity(capacity) .setRemaining(remaining) .setScmUsed(scmUsed) + .setCommitted(committed) .setStorageType(volume.getStorageType()); StorageLocationReport r = builder.build(); reports[counter++] = r; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java index c90dcea81ff2..18e7354ec1da 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java @@ -142,4 +142,9 @@ public long getTotalCapacity() { return (getUsed() + getAvailable() + getReserved()); } + @Metric("Returns the Committed bytes of the Volume") + public long getCommitted() { + return volume.getCommittedBytes(); + } + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index e7a06abc9e36..57cf0a8b9dd0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -162,6 +162,14 @@ public static long getMinVolumeFreeSpace(ConfigurationSource conf, } + public static boolean hasVolumeEnoughSpace(long volumeAvailableSpace, + long volumeCommittedBytesCount, + long requiredSpace, + long volumeFreeSpaceToSpare) { + return (volumeAvailableSpace - volumeCommittedBytesCount) > + Math.max(requiredSpace, volumeFreeSpaceToSpare); + } + /** * Class representing precomputed space values of a volume. * This class is intended to store precomputed values, such as capacity diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 1e34fb104939..277ab4464e30 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -524,7 +524,7 @@ public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() = StorageContainerDatanodeProtocolProtos. NodeReportProto.newBuilder(); for (int i = 0; i < reports.length; i++) { - nrb.addStorageReport(reports[i].getProtoBufMessage()); + nrb.addStorageReport(reports[i].getProtoBufMessage(config)); } StorageLocationReport[] metaReports = metaVolumeSet.getStorageReport(); diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml index 3b41132f5f57..9ff328776657 100644 --- a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml +++ b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml @@ -1433,6 +1433,9 @@ components: remaining: type: number example: 1080410456064 + committed: + type: number + example: 1080410456 containers: type: integer example: 26 @@ -1480,6 +1483,9 @@ components: remaining: type: number example: 270071111680 + committed: + type: number + example: 27007111 pipelines: type: array items: diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 5c20745c061e..3f346300b3ed 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -187,6 +187,8 @@ message DatanodeUsageInfoProto { optional int64 remaining = 3; optional DatanodeDetailsProto node = 4; optional int64 containerCount = 5; + optional int64 committed = 6; + optional int64 freeSpaceToSpare = 7; } /** diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index de9e39789b51..2994073c0240 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -179,6 +179,8 @@ message StorageReportProto { optional uint64 remaining = 5 [default = 0]; optional StorageTypeProto storageType = 6 [default = DISK]; optional bool failed = 7 [default = false]; + optional uint64 committed = 8 [default = 0]; + optional uint64 freeSpaceToSpare = 9 [default = 0]; } message MetadataStorageReportProto { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 90140c44810b..78b1a031b8e8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.scm.node.DatanodeInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; +import org.apache.hadoop.ozone.container.common.volume.VolumeUsage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -274,7 +275,7 @@ public List filterNodesWithSpace(List nodes, int nodesRequired, long metadataSizeRequired, long dataSizeRequired) throws SCMException { List nodesWithSpace = nodes.stream().filter(d -> - hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired)) + hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired, conf)) .collect(Collectors.toList()); if (nodesWithSpace.size() < nodesRequired) { @@ -298,7 +299,9 @@ public List filterNodesWithSpace(List nodes, * @return true if we have enough space. */ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, - long metadataSizeRequired, long dataSizeRequired) { + long metadataSizeRequired, + long dataSizeRequired, + ConfigurationSource conf) { Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo); boolean enoughForData = false; @@ -308,7 +311,9 @@ public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, if (dataSizeRequired > 0) { for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) { - if (reportProto.getRemaining() > dataSizeRequired) { + if (VolumeUsage.hasVolumeEnoughSpace(reportProto.getRemaining(), + reportProto.getCommitted(), dataSizeRequired, + reportProto.getFreeSpaceToSpare())) { enoughForData = true; break; } @@ -494,7 +499,7 @@ public boolean isValidNode(DatanodeDetails datanodeDetails, NodeStatus nodeStatus = datanodeInfo.getNodeStatus(); if (nodeStatus.isNodeWritable() && (hasEnoughSpace(datanodeInfo, metadataSizeRequired, - dataSizeRequired))) { + dataSizeRequired, conf))) { LOG.debug("Datanode {} is chosen. Required metadata size is {} and " + "required data size is {} and NodeStatus is {}", datanodeDetails, metadataSizeRequired, dataSizeRequired, nodeStatus); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java index 6541d75d2793..abbc50ac86a5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java @@ -932,7 +932,7 @@ private long ratioToBytes(Long nodeCapacity, double utilizationRatio) { return 0; } SCMNodeStat aggregatedStats = new SCMNodeStat( - 0, 0, 0); + 0, 0, 0, 0, 0); for (DatanodeUsageInfo node : nodes) { aggregatedStats.add(node.getScmNodeStat()); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java index d6857d395cfb..eedc89dfc585 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java @@ -42,6 +42,18 @@ interface NodeStat { */ LongMetric getRemaining(); + /** + * Get the committed space of the node. + * @return the committed space of the node + */ + LongMetric getCommitted(); + + /** + * Get a min free space available to spare on the node. + * @return a min free space available to spare + */ + LongMetric getFreeSpaceToSpare(); + /** * Set the total/used/remaining space. * @param capacity - total space. @@ -49,7 +61,8 @@ interface NodeStat { * @param remain - remaining space. */ @VisibleForTesting - void set(long capacity, long used, long remain); + void set(long capacity, long used, long remain, long committed, + long freeSpaceToSpare); /** * Adding of the stat. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index 2f5c6f33f73e..330bf67416ae 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -36,16 +36,19 @@ public SCMNodeMetric(SCMNodeStat stat) { } /** - * Set the capacity, used and remaining space on a datanode. + * Set the capacity, used, remaining and committed space on a datanode. * - * @param capacity in bytes - * @param used in bytes + * @param capacity in bytes + * @param used in bytes * @param remaining in bytes + * @param committed + * @paaram committed in bytes */ @VisibleForTesting - public SCMNodeMetric(long capacity, long used, long remaining) { + public SCMNodeMetric(long capacity, long used, long remaining, + long committed, long freeSpaceToSpare) { this.stat = new SCMNodeStat(); - this.stat.set(capacity, used, remaining); + this.stat.set(capacity, used, remaining, committed, freeSpaceToSpare); } /** @@ -156,7 +159,8 @@ public SCMNodeStat get() { @Override public void set(SCMNodeStat value) { stat.set(value.getCapacity().get(), value.getScmUsed().get(), - value.getRemaining().get()); + value.getRemaining().get(), value.getCommitted().get(), + value.getFreeSpaceToSpare().get()); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java index 962bbb464ecc..2a848a04eff5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java @@ -28,16 +28,20 @@ public class SCMNodeStat implements NodeStat { private LongMetric capacity; private LongMetric scmUsed; private LongMetric remaining; + private LongMetric committed; + private LongMetric freeSpaceToSpare; public SCMNodeStat() { - this(0L, 0L, 0L); + this(0L, 0L, 0L, 0L, 0L); } public SCMNodeStat(SCMNodeStat other) { - this(other.capacity.get(), other.scmUsed.get(), other.remaining.get()); + this(other.capacity.get(), other.scmUsed.get(), other.remaining.get(), + other.committed.get(), other.freeSpaceToSpare.get()); } - public SCMNodeStat(long capacity, long used, long remaining) { + public SCMNodeStat(long capacity, long used, long remaining, long committed, + long freeSpaceToSpare) { Preconditions.checkArgument(capacity >= 0, "Capacity cannot be " + "negative."); Preconditions.checkArgument(used >= 0, "used space cannot be " + @@ -47,6 +51,8 @@ public SCMNodeStat(long capacity, long used, long remaining) { this.capacity = new LongMetric(capacity); this.scmUsed = new LongMetric(used); this.remaining = new LongMetric(remaining); + this.committed = new LongMetric(committed); + this.freeSpaceToSpare = new LongMetric(freeSpaceToSpare); } /** @@ -73,6 +79,24 @@ public LongMetric getRemaining() { return remaining; } + /** + * + * @return the total committed space on the node + */ + @Override + public LongMetric getCommitted() { + return committed; + } + + /** + * Get a min space available to spare on the node. + * @return a min free space available to spare on the node + */ + @Override + public LongMetric getFreeSpaceToSpare() { + return freeSpaceToSpare; + } + /** * Set the capacity, used and remaining space on a datanode. * @@ -82,7 +106,8 @@ public LongMetric getRemaining() { */ @Override @VisibleForTesting - public void set(long newCapacity, long newUsed, long newRemaining) { + public void set(long newCapacity, long newUsed, long newRemaining, + long newCommitted, long newFreeSpaceToSpare) { Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " + "negative."); Preconditions.checkArgument(newUsed >= 0, "used space cannot be " + @@ -93,6 +118,8 @@ public void set(long newCapacity, long newUsed, long newRemaining) { this.capacity = new LongMetric(newCapacity); this.scmUsed = new LongMetric(newUsed); this.remaining = new LongMetric(newRemaining); + this.committed = new LongMetric(newCommitted); + this.freeSpaceToSpare = new LongMetric(newFreeSpaceToSpare); } /** @@ -106,6 +133,9 @@ public SCMNodeStat add(NodeStat stat) { this.capacity.set(this.getCapacity().get() + stat.getCapacity().get()); this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get()); this.remaining.set(this.getRemaining().get() + stat.getRemaining().get()); + this.committed.set(this.getCommitted().get() + stat.getCommitted().get()); + this.freeSpaceToSpare.set(this.freeSpaceToSpare.get() + + stat.getFreeSpaceToSpare().get()); return this; } @@ -120,6 +150,9 @@ public SCMNodeStat subtract(NodeStat stat) { this.capacity.set(this.getCapacity().get() - stat.getCapacity().get()); this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get()); this.remaining.set(this.getRemaining().get() - stat.getRemaining().get()); + this.committed.set(this.getCommitted().get() - stat.getCommitted().get()); + this.freeSpaceToSpare.set(freeSpaceToSpare.get() - + stat.getFreeSpaceToSpare().get()); return this; } @@ -129,13 +162,16 @@ public boolean equals(Object to) { SCMNodeStat tempStat = (SCMNodeStat) to; return capacity.isEqual(tempStat.getCapacity().get()) && scmUsed.isEqual(tempStat.getScmUsed().get()) && - remaining.isEqual(tempStat.getRemaining().get()); + remaining.isEqual(tempStat.getRemaining().get()) && + committed.isEqual(tempStat.getCommitted().get()) && + freeSpaceToSpare.isEqual(tempStat.freeSpaceToSpare.get()); } return false; } @Override public int hashCode() { - return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get()); + return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get() ^ + committed.get() ^ freeSpaceToSpare.get()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java index 14353cfa7e37..4f7df4969063 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java @@ -205,6 +205,8 @@ private DatanodeUsageInfoProto.Builder toProtoBuilder(int clientVersion) { builder.setCapacity(scmNodeStat.getCapacity().get()); builder.setUsed(scmNodeStat.getScmUsed().get()); builder.setRemaining(scmNodeStat.getRemaining().get()); + builder.setCommitted(scmNodeStat.getCommitted().get()); + builder.setFreeSpaceToSpare(scmNodeStat.getFreeSpaceToSpare().get()); } builder.setContainerCount(containerCount); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 167b25afd01c..59ac09103250 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -855,13 +855,18 @@ public SCMNodeStat getStats() { long capacity = 0L; long used = 0L; long remaining = 0L; + long committed = 0L; + long freeSpaceToSpare = 0L; for (SCMNodeStat stat : getNodeStats().values()) { capacity += stat.getCapacity().get(); used += stat.getScmUsed().get(); remaining += stat.getRemaining().get(); + committed += stat.getCommitted().get(); + freeSpaceToSpare += stat.getFreeSpaceToSpare().get(); } - return new SCMNodeStat(capacity, used, remaining); + return new SCMNodeStat(capacity, used, remaining, committed, + freeSpaceToSpare); } /** @@ -966,6 +971,8 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { long capacity = 0L; long used = 0L; long remaining = 0L; + long committed = 0L; + long freeSpaceToSpare = 0L; final DatanodeInfo datanodeInfo = nodeStateManager .getNode(datanodeDetails); @@ -975,8 +982,11 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { capacity += reportProto.getCapacity(); used += reportProto.getScmUsed(); remaining += reportProto.getRemaining(); + committed += reportProto.getCommitted(); + freeSpaceToSpare += reportProto.getFreeSpaceToSpare(); } - return new SCMNodeStat(capacity, used, remaining); + return new SCMNodeStat(capacity, used, remaining, committed, + freeSpaceToSpare); } catch (NodeNotFoundException e) { LOG.warn("Cannot generate NodeStat, datanode {} not found.", datanodeDetails.getUuidString()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java index 01d3e71150d3..c59128286917 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java @@ -25,6 +25,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -85,12 +86,15 @@ protected abstract Pipeline createForRead( protected abstract void shutdown(); List pickNodesNotUsed(REPLICATION_CONFIG replicationConfig, - long metadataSizeRequired, long dataSizeRequired) throws SCMException { + long metadataSizeRequired, + long dataSizeRequired, + ConfigurationSource conf) + throws SCMException { int nodesRequired = replicationConfig.getRequiredNodes(); List healthyDNs = pickAllNodesNotUsed(replicationConfig); List healthyDNsWithSpace = healthyDNs.stream() .filter(dn -> SCMCommonPlacementPolicy - .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired)) + .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired, conf)) .limit(nodesRequired) .collect(Collectors.toList()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 1b62120c1ee7..8336bce5eae7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -163,7 +163,7 @@ public synchronized Pipeline create(RatisReplicationConfig replicationConfig, switch (factor) { case ONE: dns = pickNodesNotUsed(replicationConfig, minRatisVolumeSizeBytes, - containerSizeBytes); + containerSizeBytes, conf); break; case THREE: List excludeDueToEngagement = filterPipelineEngagement(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java index ffefc7c5f5db..87497a9f0709 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java @@ -24,30 +24,40 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.Node; +import org.apache.hadoop.hdds.scm.node.DatanodeInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.Mockito; - import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.util.function.Function; import java.util.stream.Stream; @@ -448,11 +458,66 @@ protected List chooseDatanodesInternal( } }; dummyPlacementPolicy.chooseDatanodes(null, null, 1, 1, 1); - Assertions.assertFalse(usedNodesIdentity.get()); + assertFalse(usedNodesIdentity.get()); dummyPlacementPolicy.chooseDatanodes(null, null, null, 1, 1, 1); Assertions.assertTrue(usedNodesIdentity.get()); } + @Test + public void testDatanodeIsInvalidInCaseOfIncreasingCommittedBytes() { + NodeManager nodeMngr = mock(NodeManager.class); + UUID datanodeUuid = UUID.randomUUID(); + DummyPlacementPolicy placementPolicy = + new DummyPlacementPolicy(nodeMngr, conf, 1); + DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); + when(datanodeDetails.getUuid()).thenReturn(datanodeUuid); + + DatanodeInfo datanodeInfo = mock(DatanodeInfo.class); + NodeStatus nodeStatus = mock(NodeStatus.class); + when(nodeStatus.isNodeWritable()).thenReturn(true); + when(datanodeInfo.getNodeStatus()).thenReturn(nodeStatus); + when(nodeMngr.getNodeByUuid(eq(datanodeUuid))).thenReturn(datanodeInfo); + + // capacity = 200000, used = 90000, remaining = 101000, committed = 500 + StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 = + HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds", + 200000, 90000, 101000, DISK).toBuilder() + .setCommitted(500) + .setFreeSpaceToSpare(10000) + .build(); + // capacity = 200000, used = 90000, remaining = 101000, committed = 1000 + StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 = + HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds", + 200000, 90000, 101000, DISK).toBuilder() + .setCommitted(1000) + .setFreeSpaceToSpare(100000) + .build(); + StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto + metaReport = HddsTestUtils.createMetadataStorageReport("/data/metadata", + 200); + when(datanodeInfo.getStorageReports()) + .thenReturn(Collections.singletonList(storageReport1)) + .thenReturn(Collections.singletonList(storageReport2)); + when(datanodeInfo.getMetadataStorageReports()) + .thenReturn(Collections.singletonList(metaReport)); + + + // 500 committed bytes: + // + // 101000 500 + // | | + // (remaining - committed) > Math.max(4000, freeSpaceToSpare) + // | + // 100000 + // + // Summary: 101000 - 500 > 100000 == true + assertTrue(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); + + // 1000 committed bytes: + // Summary: 101000 - 1000 > 100000 == false + assertFalse(placementPolicy.isValidNode(datanodeDetails, 100, 4000)); + } + private static class DummyPlacementPolicy extends SCMCommonPlacementPolicy { private Map rackMap; private List racks; @@ -485,7 +550,7 @@ private static class DummyPlacementPolicy extends SCMCommonPlacementPolicy { super(nodeManager, conf); this.rackCnt = rackCnt; this.racks = IntStream.range(0, rackCnt) - .mapToObj(i -> Mockito.mock(Node.class)).collect(Collectors.toList()); + .mapToObj(i -> mock(Node.class)).collect(Collectors.toList()); List datanodeDetails = nodeManager.getAllNodes(); rackMap = datanodeRackMap.entrySet().stream() .collect(Collectors.toMap( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 98638ebe009d..794dedceef06 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -227,7 +227,7 @@ private void populateNodeMetric(DatanodeDetails datanodeDetails, int x) { NODES[x % NODES.length].capacity - NODES[x % NODES.length].used; newStat.set( (NODES[x % NODES.length].capacity), - (NODES[x % NODES.length].used), remaining); + (NODES[x % NODES.length].used), remaining, 0, 100000); this.nodeMetricMap.put(datanodeDetails, newStat); aggregateStat.add(newStat); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index 4bc3cf43cf6e..56d02dabb5fa 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -1207,7 +1207,8 @@ private double createCluster() { datanodeCapacity = (long) (datanodeUsedSpace / nodeUtilizations.get(i)); } SCMNodeStat stat = new SCMNodeStat(datanodeCapacity, datanodeUsedSpace, - datanodeCapacity - datanodeUsedSpace); + datanodeCapacity - datanodeUsedSpace, 0, + datanodeCapacity - datanodeUsedSpace - 1); nodesInCluster.get(i).setScmNodeStat(stat); clusterUsedSpace += datanodeUsedSpace; clusterCapacity += datanodeCapacity; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java index 7e734042d883..bb6f17bcc105 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java @@ -56,11 +56,11 @@ public void testFindTargetGreedyByUsage() { //create three datanodes with different usageinfo DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40, 0, 30)); DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60, 0, 30)); DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80)); + .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80, 0, 30)); //insert in ascending order overUtilizedDatanodes.add(dui1); @@ -98,11 +98,11 @@ public void testFindTargetGreedyByUsage() { public void testResetPotentialTargets() { // create three datanodes with different usage infos DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70)); + .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70, 0, 50)); DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80)); + .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80, 0, 60)); DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails - .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90)); + .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90, 0, 70)); List potentialTargets = new ArrayList<>(); potentialTargets.add(dui1); @@ -179,18 +179,18 @@ public void testFindTargetGreedyByNetworkTopology() { List overUtilizedDatanodes = new ArrayList<>(); //set the farthest target with the lowest usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90))); + new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90, 0, 80))); //set the tree targets, which have the same network topology distance //to source , with different usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20))); + new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20, 0, 10))); overUtilizedDatanodes.add( - new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40))); + new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40, 0, 30))); overUtilizedDatanodes.add( - new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60))); + new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60, 0, 50))); //set the nearest target with the highest usage info overUtilizedDatanodes.add( - new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10))); + new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10, 0, 5))); FindTargetGreedyByNetworkTopology findTargetGreedyByNetworkTopology = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index 910fe75ede6c..e51f9731ad4a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -103,13 +103,13 @@ public void chooseDatanodes() throws SCMException { .thenReturn(new ArrayList<>(datanodes)); when(mockNodeManager.getNodeStat(any())) - .thenReturn(new SCMNodeMetric(100L, 0L, 100L)); + .thenReturn(new SCMNodeMetric(100L, 0L, 100L, 0, 90)); when(mockNodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(100L, 90L, 10L)); + .thenReturn(new SCMNodeMetric(100L, 90L, 10L, 0, 9)); when(mockNodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(100L, 80L, 20L)); + .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0, 19)); when(mockNodeManager.getNodeStat(datanodes.get(4))) - .thenReturn(new SCMNodeMetric(100L, 70L, 30L)); + .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0, 20)); when(mockNodeManager.getNodeByUuid(any(UUID.class))).thenAnswer( invocation -> datanodes.stream() .filter(dn -> dn.getUuid().equals(invocation.getArgument(0))) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java index 6ba2fc440a4f..9c9bfad582f7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java @@ -31,13 +31,13 @@ public class TestDatanodeMetrics { @Test public void testSCMNodeMetric() { - SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L); + SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L, 0, 80); assertEquals((long) stat.getCapacity().get(), 100L); assertEquals(10L, (long) stat.getScmUsed().get()); assertEquals(90L, (long) stat.getRemaining().get()); SCMNodeMetric metric = new SCMNodeMetric(stat); - SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L); + SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L, 0, 80); assertEquals(100L, (long) stat.getCapacity().get()); assertEquals(10L, (long) stat.getScmUsed().get()); assertEquals(90L, (long) stat.getRemaining().get()); @@ -53,8 +53,8 @@ public void testSCMNodeMetric() { assertTrue(metric.isGreater(zeroMetric.get())); // Another case when nodes have similar weight - SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L); - SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L); + SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L, 0, 100000); + SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L, 0, 100000); assertTrue(new SCMNodeMetric(stat2).isGreater(stat1)); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java index d46513b24bbd..b967fa0658c0 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java @@ -155,8 +155,16 @@ private void printInfo(DatanodeUsage info) { + " B", StringUtils.byteDesc(info.getRemaining())); System.out.printf("%-13s: %s %n", "Remaining %", PERCENT_FORMAT.format(info.getRemainingRatio())); - System.out.printf("%-13s: %d %n%n", "Container(s)", + System.out.printf("%-13s: %d %n", "Container(s)", info.getContainerCount()); + System.out.printf("%-24s: %s (%s) %n", "Container Pre-allocated", + info.getCommitted() + " B", StringUtils.byteDesc(info.getCommitted())); + System.out.printf("%-24s: %s (%s) %n", "Remaining Allocatable", + (info.getRemaining() - info.getCommitted()) + " B", + StringUtils.byteDesc((info.getRemaining() - info.getCommitted()))); + System.out.printf("%-24s: %s (%s) %n%n", "Free Space To Spare", + info.getFreeSpaceToSpare() + " B", + StringUtils.byteDesc(info.getFreeSpaceToSpare())); } /** @@ -181,6 +189,8 @@ private static class DatanodeUsage { private long capacity = 0; private long used = 0; private long remaining = 0; + private long committed = 0; + private long freeSpaceToSpare = 0; private long containerCount = 0; DatanodeUsage(HddsProtos.DatanodeUsageInfoProto proto) { @@ -196,9 +206,15 @@ private static class DatanodeUsage { if (proto.hasRemaining()) { remaining = proto.getRemaining(); } + if (proto.hasCommitted()) { + committed = proto.getCommitted(); + } if (proto.hasContainerCount()) { containerCount = proto.getContainerCount(); } + if (proto.hasFreeSpaceToSpare()) { + freeSpaceToSpare = proto.getFreeSpaceToSpare(); + } } public DatanodeDetails getDatanodeDetails() { @@ -220,6 +236,12 @@ public long getOzoneUsed() { public long getRemaining() { return remaining; } + public long getCommitted() { + return committed; + } + public long getFreeSpaceToSpare() { + return freeSpaceToSpare; + } public long getContainerCount() { return containerCount; diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java index 0cc8ed9be639..a52a0a7ed8f5 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java @@ -19,6 +19,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.codec.CharEncoding; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.client.ScmClient; @@ -97,6 +98,38 @@ public void testCorrectJsonValuesInReport() throws IOException { json.get(0).get("containerCount").longValue()); } + @Test + public void testOutputDataFieldsAligning() throws IOException { + // given + ScmClient scmClient = mock(ScmClient.class); + Mockito.when(scmClient.getDatanodeUsageInfo( + Mockito.anyBoolean(), Mockito.anyInt())) + .thenAnswer(invocation -> getUsageProto()); + + CommandLine c = new CommandLine(cmd); + c.parseArgs("-m"); + + // when + cmd.execute(scmClient); + + // then + String output = outContent.toString(CharEncoding.UTF_8); + Assertions.assertTrue(output.contains("UUID :")); + Assertions.assertTrue(output.contains("IP Address :")); + Assertions.assertTrue(output.contains("Hostname :")); + Assertions.assertTrue(output.contains("Capacity :")); + Assertions.assertTrue(output.contains("Total Used :")); + Assertions.assertTrue(output.contains("Total Used % :")); + Assertions.assertTrue(output.contains("Ozone Used :")); + Assertions.assertTrue(output.contains("Ozone Used % :")); + Assertions.assertTrue(output.contains("Remaining :")); + Assertions.assertTrue(output.contains("Remaining % :")); + Assertions.assertTrue(output.contains("Container(s) :")); + Assertions.assertTrue(output.contains("Container Pre-allocated :")); + Assertions.assertTrue(output.contains("Remaining Allocatable :")); + Assertions.assertTrue(output.contains("Free Space To Spare :")); + } + private List getUsageProto() { List result = new ArrayList<>(); result.add(HddsProtos.DatanodeUsageInfoProto.newBuilder() diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java index bc87c402eb29..b074e5ba56a6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java @@ -120,7 +120,8 @@ public Response getClusterState() { SCMNodeStat stats = nodeManager.getStats(); DatanodeStorageReport storageReport = new DatanodeStorageReport(stats.getCapacity().get(), - stats.getScmUsed().get(), stats.getRemaining().get()); + stats.getScmUsed().get(), stats.getRemaining().get(), + stats.getCommitted().get()); ClusterStateResponse.Builder builder = ClusterStateResponse.newBuilder(); GlobalStats volumeRecord = globalStatsDao.findById( diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index 33df0ca1bd5f..968bfbc46343 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -168,6 +168,7 @@ private DatanodeStorageReport getStorageReport(DatanodeDetails datanode) { long capacity = nodeStat.getCapacity().get(); long used = nodeStat.getScmUsed().get(); long remaining = nodeStat.getRemaining().get(); - return new DatanodeStorageReport(capacity, used, remaining); + long committed = nodeStat.getCommitted().get(); + return new DatanodeStorageReport(capacity, used, remaining, committed); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java index d3fbb598c1b2..43a20317a29e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java @@ -24,11 +24,14 @@ public class DatanodeStorageReport { private long capacity; private long used; private long remaining; + private long committed; - public DatanodeStorageReport(long capacity, long used, long remaining) { + public DatanodeStorageReport(long capacity, long used, long remaining, + long committed) { this.capacity = capacity; this.used = used; this.remaining = remaining; + this.committed = committed; } public long getCapacity() { @@ -42,4 +45,8 @@ public long getUsed() { public long getRemaining() { return remaining; } + + public long getCommitted() { + return committed; + } } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index 60362299fa55..204609f66fec 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -29,7 +29,8 @@ "storageReport": { "capacity": 62725623808, "used": 488288256, - "remaining": 21005319168 + "remaining": 21005319168, + "committed": 10240000 }, "pipelines": [ { @@ -62,7 +63,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 34563456 }, "pipelines": [ { @@ -95,7 +97,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 34562 }, "pipelines": [ { @@ -128,7 +131,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 4576435 }, "pipelines": [ { @@ -161,7 +165,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 95784247808 + "remaining": 95784247808, + "committed": 3453121 }, "pipelines": [ { @@ -194,7 +199,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 3457623435 }, "pipelines": [ { @@ -233,7 +239,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 345624 }, "pipelines": [ { @@ -272,7 +279,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 123464574 }, "pipelines": [ { @@ -311,7 +319,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 556721345 }, "pipelines": [ { @@ -350,7 +359,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 86757023244288 + "remaining": 86757023244288, + "committed": 45671235234 }, "pipelines": [ { @@ -389,7 +399,8 @@ "storageReport": { "capacity": 140737488355328, "used": 0, - "remaining": 110737488355328 + "remaining": 110737488355328, + "committed": 0 }, "pipelines": [], "containers": 0, @@ -409,7 +420,8 @@ "storageReport": { "capacity": 805306368000, "used": 644245094400, - "remaining": 121061273600 + "remaining": 121061273600, + "committed": 4572345234 }, "pipelines": [ { @@ -442,7 +454,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 92757023244288 + "remaining": 92757023244288, + "committed": 34563453 }, "pipelines": [ { @@ -475,7 +488,8 @@ "storageReport": { "capacity": 549755813888, "used": 450971566080, - "remaining": 94784247808 + "remaining": 94784247808, + "committed": 7234234 }, "pipelines": [ { @@ -514,7 +528,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 92757023244288 + "remaining": 92757023244288, + "committed": 34562346 }, "pipelines": [ { @@ -547,7 +562,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 76757023244288 + "remaining": 76757023244288, + "committed": 834324523 }, "pipelines": [ { @@ -580,7 +596,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 66757023244288 + "remaining": 66757023244288, + "committed": 346467345 }, "pipelines": [ { @@ -619,7 +636,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 96157023244288 + "remaining": 96157023244288, + "committed": 45245456 }, "pipelines": [ { @@ -652,7 +670,8 @@ "storageReport": { "capacity": 140737488355328, "used": 43980465111040, - "remaining": 94757023244288 + "remaining": 94757023244288, + "committed": 45673234 }, "pipelines": [ { diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less index b2dddbcaa37d..ecba534cc08c 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less @@ -19,6 +19,7 @@ @progress-gray: #d0d0d0; @progress-blue: #1890ff; @progress-green: #52c41a; +@progress-dark-grey: #424242; .storage-cell-container { position: relative; @@ -45,3 +46,7 @@ .remaining-bg { color: @progress-gray; } + +.committed-bg { + color: @progress-dark-grey; +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx index 10decce103fa..9263c6817beb 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx @@ -32,6 +32,7 @@ interface IStorageBarProps extends RouteComponentProps { total: number; used: number; remaining: number; + committed: number; showMeta?: boolean; } @@ -39,6 +40,7 @@ const defaultProps = { total: 0, used: 0, remaining: 0, + committed: 0, showMeta: true }; @@ -46,7 +48,7 @@ class StorageBar extends React.Component { static defaultProps = defaultProps; render() { - const {total, used, remaining, showMeta} = this.props; + const {total, used, remaining, committed, showMeta} = this.props; const nonOzoneUsed = total - remaining - used; const totalUsed = total - remaining; const tooltip = ( @@ -54,6 +56,7 @@ class StorageBar extends React.Component {
Ozone Used ({size(used)})
Non Ozone Used ({size(nonOzoneUsed)})
Remaining ({size(remaining)})
+
Container Pre-allocated ({size(committed)})
); const metaElement = showMeta ?
{size(used)} + {size(nonOzoneUsed)} / {size(total)}
: null; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx index 8f92742916f3..d69466ac0fea 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx @@ -30,4 +30,5 @@ export interface IStorageReport { capacity: number; used: number; remaining: number; + committed: number; } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index e418bf2fefd0..4d80bc7d2dda 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -69,6 +69,7 @@ interface IDatanode { storageUsed: number; storageTotal: number; storageRemaining: number; + storageCommitted: number; pipelines: IPipeline[]; containers: number; openContainers: number; @@ -173,7 +174,7 @@ const COLUMNS = [ render: (text: string, record: IDatanode) => ( + remaining={record.storageRemaining} committed={record.storageCommitted}/> )}, { title: 'Last Heartbeat', @@ -358,6 +359,7 @@ export class Datanodes extends React.Component, IDatanode storageUsed: datanode.storageReport.used, storageTotal: datanode.storageReport.capacity, storageRemaining: datanode.storageReport.remaining, + storageCommitted: datanode.storageReport.committed, pipelines: datanode.pipelines, containers: datanode.containers, openContainers: datanode.openContainers, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java index d3bee19ba6e9..cbe850b918f0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java @@ -1248,6 +1248,6 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE); + ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index b324bd6b4276..ba00f843f447 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -1286,6 +1286,6 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE); + ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1); } }