diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java index f7a99b9b1ee9..0f188e53b137 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java @@ -55,7 +55,7 @@ protected DbVolume(Builder b) throws IOException { super(b); this.hddsDbStorePathMap = new HashMap<>(); - if (!b.getFailedVolume() && getVolumeInfo().isPresent()) { + if (!b.getFailedVolume()) { LOG.info("Creating DbVolume: {} of storage type: {}, {}", getStorageDir(), b.getStorageType(), getCurrentUsage()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 5513363d2a60..b88961e04ae4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -49,7 +49,7 @@ /** * HddsVolume represents volume in a datanode. {@link MutableVolumeSet} * maintains a list of HddsVolumes, one for each volume in the Datanode. - * {@link VolumeInfo} in encompassed by this class. + * {@link VolumeUsage} in encompassed by this class. *
* The disk layout per volume is as follows: *
../hdds/VERSION
@@ -119,7 +119,7 @@ public HddsVolume build() throws IOException {
private HddsVolume(Builder b) throws IOException {
super(b);
- if (!b.getFailedVolume() && getVolumeInfo().isPresent()) {
+ if (!b.getFailedVolume()) {
this.setState(VolumeState.NOT_INITIALIZED);
ConfigurationSource conf = getConf();
int[] intervals = conf.getInts(OZONE_DATANODE_IO_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index f65d89e4cfe8..2b41c4872de2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -277,7 +277,7 @@ public void checkVolumeAsync(StorageVolume volume) {
}
public void refreshAllVolumeUsage() {
- volumeMap.forEach((k, v) -> v.refreshVolumeInfo());
+ volumeMap.forEach((k, v) -> v.refreshVolumeUsage());
}
/**
@@ -464,18 +464,17 @@ public StorageLocationReport[] getStorageReport() {
StorageVolume volume;
for (Map.Entry
- * {@code
- * |----used----| (avail) |++mvfs++|++++reserved+++++++|
- * |<- capacity ->|
- * | fsAvail |-------other-----------|
- * |<- fsCapacity ->|
- * }
- *
- * What we could directly get from local fs:
- * fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
- * We could get from config:
- * reserved
- * Get from cmd line:
- * used: from cmd 'du' (by default)
- * Get from calculation:
- * capacity = fsCapacity - reserved
- * other = fsUsed - used
- *
- * The avail is the result we want from calculation.
- * So, it could be:
- * A) avail = capacity - used
- * B) avail = fsAvail - Max(reserved - other, 0);
- *
- * To be Conservative, we could get min
- * avail = Max(Min(A, B), 0);
- *
- * If we have a dedicated disk for hdds and are not using the reserved space,
- * then we should use DedicatedDiskSpaceUsage for
- * `hdds.datanode.du.factory.classname`,
- * Then it is much simpler, since we don't care about other usage:
- * {@code
- * |----used----| (avail)/fsAvail |
- * |<- capacity/fsCapacity ->|
- * }
- *
- * We have avail == fsAvail.
- *
- */
-public final class VolumeInfo {
-
- private static final Logger LOG = LoggerFactory.getLogger(VolumeInfo.class);
-
- private final String rootDir;
- private final StorageType storageType;
-
- // Space usage calculator
- private final VolumeUsage usage;
-
- /**
- * Builder for VolumeInfo.
- */
- public static class Builder {
- private final ConfigurationSource conf;
- private final String rootDir;
- private SpaceUsageCheckFactory usageCheckFactory;
- private StorageType storageType;
-
- public Builder(String root, ConfigurationSource config) {
- this.rootDir = root;
- this.conf = config;
- }
-
- public Builder storageType(StorageType st) {
- this.storageType = st;
- return this;
- }
-
- public Builder usageCheckFactory(SpaceUsageCheckFactory factory) {
- this.usageCheckFactory = factory;
- return this;
- }
-
- public VolumeInfo build() throws IOException {
- return new VolumeInfo(this);
- }
- }
-
- private VolumeInfo(Builder b) throws IOException {
-
- this.rootDir = b.rootDir;
- File root = new File(this.rootDir);
-
- boolean succeeded = root.isDirectory() || root.mkdirs();
-
- if (!succeeded) {
- LOG.error("Unable to create the volume root dir at : {}", root);
- throw new IOException("Unable to create the volume root dir at " + root);
- }
-
- this.storageType = (b.storageType != null ?
- b.storageType : StorageType.DEFAULT);
-
- SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory;
- if (usageCheckFactory == null) {
- usageCheckFactory = SpaceUsageCheckFactory.create(b.conf);
- }
- SpaceUsageCheckParams checkParams =
- usageCheckFactory.paramsFor(root);
-
- usage = new VolumeUsage(checkParams, b.conf);
- }
-
- public SpaceUsageSource getCurrentUsage() {
- return usage.getCurrentUsage();
- }
-
- public void incrementUsedSpace(long usedSpace) {
- usage.incrementUsedSpace(usedSpace);
- }
-
- public void decrementUsedSpace(long reclaimedSpace) {
- usage.decrementUsedSpace(reclaimedSpace);
- }
-
- public void refreshNow() {
- usage.refreshNow();
- }
-
- void shutdownUsageThread() {
- usage.shutdown();
- }
-
- public String getRootDir() {
- return this.rootDir;
- }
-
- public StorageType getStorageType() {
- return this.storageType;
- }
-
- /**
- * Only for testing. Do not use otherwise.
- */
- @VisibleForTesting
- public VolumeUsage getUsageForTesting() {
- return usage;
- }
-
- public long getReservedInBytes() {
- return usage.getReservedBytes();
- }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
index 7a9b465a68e9..c864f416cdb4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
@@ -131,9 +131,9 @@ public long getContainers() {
public void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder builder = collector.addRecord(metricsSourceName);
registry.snapshot(builder, all);
- volume.getVolumeInfo().ifPresent(volumeInfo -> {
- SpaceUsageSource usage = volumeInfo.getCurrentUsage();
- long reserved = volumeInfo.getReservedInBytes();
+ volume.getVolumeUsage().ifPresent(volumeUsage -> {
+ SpaceUsageSource usage = volumeUsage.getCurrentUsage();
+ long reserved = volumeUsage.getReservedInBytes();
builder
.addGauge(CAPACITY, usage.getCapacity())
.addGauge(AVAILABLE, usage.getAvailable())
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index 6c4f7bf35cbe..0fcab327a4d7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -36,8 +36,61 @@
import org.slf4j.LoggerFactory;
/**
- * Class that wraps the space df of the Datanode Volumes used by SCM
- * containers.
+ * Stores information about a disk/volume.
+ *
+ * Since we have a reserved space for each volume for other usage,
+ * let's clarify the space values a bit here:
+ * - used: hdds actual usage.
+ * - avail: remaining space for hdds usage.
+ * - reserved: total space for other usage.
+ * - capacity: total space for hdds usage.
+ * - other: space used by other service consuming the same volume.
+ * - fsAvail: reported remaining space from local fs.
+ * - fsUsed: reported total used space from local fs.
+ * - fsCapacity: reported total capacity from local fs.
+ * - minVolumeFreeSpace (mvfs) : determines the free space for closing
+ containers.This is like adding a few reserved bytes to reserved space.
+ Dn's will send close container action to SCM at this limit, and it is
+ configurable.
+
+ *
+ *
+ * {@code
+ * |----used----| (avail) |++mvfs++|++++reserved+++++++|
+ * |<- capacity ->|
+ * | fsAvail |-------other-----------|
+ * |<- fsCapacity ->|
+ * }
+ *
+ * What we could directly get from local fs:
+ * fsCapacity, fsAvail, (fsUsed = fsCapacity - fsAvail)
+ * We could get from config:
+ * reserved
+ * Get from cmd line:
+ * used: from cmd 'du' (by default)
+ * Get from calculation:
+ * capacity = fsCapacity - reserved
+ * other = fsUsed - used
+ *
+ * The avail is the result we want from calculation.
+ * So, it could be:
+ * A) avail = capacity - used
+ * B) avail = fsAvail - Max(reserved - other, 0);
+ *
+ * To be Conservative, we could get min
+ * avail = Max(Min(A, B), 0);
+ *
+ * If we have a dedicated disk for hdds and are not using the reserved space,
+ * then we should use DedicatedDiskSpaceUsage for
+ * `hdds.datanode.du.factory.classname`,
+ * Then it is much simpler, since we don't care about other usage:
+ * {@code
+ * |----used----| (avail)/fsAvail |
+ * |<- capacity/fsCapacity ->|
+ * }
+ *
+ * We have avail == fsAvail.
+ *
*/
public class VolumeUsage {
@@ -120,7 +173,7 @@ public void refreshNow() {
source.refreshNow();
}
- public long getReservedBytes() {
+ public long getReservedInBytes() {
return reservedInBytes;
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index 1243b5d46b0b..152789751157 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -289,8 +289,8 @@ public void testContainerCloseActionWhenVolumeFull(
HddsDispatcher hddsDispatcher = new HddsDispatcher(
conf, containerSet, volumeSet, handlers, context, metrics, null);
hddsDispatcher.setClusterId(scmId.toString());
- containerData.getVolume().getVolumeInfo()
- .ifPresent(volumeInfo -> volumeInfo.incrementUsedSpace(50));
+ containerData.getVolume().getVolumeUsage()
+ .ifPresent(usage -> usage.incrementUsedSpace(50));
usedSpace.addAndGet(50);
ContainerCommandResponseProto response = hddsDispatcher
.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
index bcc123520e02..55fc68f4f56b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
@@ -69,11 +69,11 @@ public void testDefaultConfig() throws Exception {
// Gets the total capacity reported by Ozone, which may be limited to less than the volume's real capacity by the
// DU reserved configurations.
long volumeCapacity = hddsVolume.getCurrentUsage().getCapacity();
- VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting();
+ VolumeUsage usage = hddsVolume.getVolumeUsage().get();
// Gets the actual total capacity without accounting for DU reserved space configurations.
long totalCapacity = usage.realUsage().getCapacity();
- long reservedCapacity = usage.getReservedBytes();
+ long reservedCapacity = usage.getReservedInBytes();
assertEquals(getExpectedDefaultReserved(hddsVolume), reservedCapacity);
assertEquals(totalCapacity - reservedCapacity, volumeCapacity);
@@ -93,11 +93,11 @@ public void testVolumeCapacityAfterReserve() throws Exception {
HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT);
long volumeCapacity = hddsVolume.getCurrentUsage().getCapacity();
- VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting();
+ VolumeUsage usage = hddsVolume.getVolumeUsage().get();
//Gets the actual total capacity
long totalCapacity = usage.realUsage().getCapacity();
- long reservedCapacity = usage.getReservedBytes();
+ long reservedCapacity = usage.getReservedInBytes();
long reservedCalculated = (long) Math.ceil(totalCapacity * percentage);
assertEquals(reservedCalculated, reservedCapacity);
@@ -117,7 +117,7 @@ public void testReservedWhenBothConfigSet() throws Exception {
folder.toString() + ":500B");
HddsVolume hddsVolume = volumeBuilder.conf(conf).build();
- long reservedFromVolume = hddsVolume.getVolumeInfo().get()
+ long reservedFromVolume = hddsVolume.getVolumeUsage().get()
.getReservedInBytes();
assertEquals(500, reservedFromVolume);
}
@@ -131,8 +131,8 @@ public void testFallbackToPercentConfig() throws Exception {
temp.toString() + ":500B");
HddsVolume hddsVolume = volumeBuilder.conf(conf).build();
- VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting();
- long reservedFromVolume = usage.getReservedBytes();
+ VolumeUsage usage = hddsVolume.getVolumeUsage().get();
+ long reservedFromVolume = usage.getReservedInBytes();
assertNotEquals(0, reservedFromVolume);
long totalCapacity = usage.realUsage().getCapacity();
@@ -151,7 +151,7 @@ public void testInvalidConfig() throws Exception {
folder.toString() + ":500C");
HddsVolume hddsVolume1 = volumeBuilder.conf(conf1).build();
- long reservedFromVolume1 = hddsVolume1.getVolumeInfo().get()
+ long reservedFromVolume1 = hddsVolume1.getVolumeUsage().get()
.getReservedInBytes();
assertEquals(getExpectedDefaultReserved(hddsVolume1), reservedFromVolume1);
@@ -161,7 +161,7 @@ public void testInvalidConfig() throws Exception {
conf2.set(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, "20");
HddsVolume hddsVolume2 = volumeBuilder.conf(conf2).build();
- long reservedFromVolume2 = hddsVolume2.getVolumeInfo().get()
+ long reservedFromVolume2 = hddsVolume2.getVolumeUsage().get()
.getReservedInBytes();
assertEquals(getExpectedDefaultReserved(hddsVolume2), reservedFromVolume2);
}
@@ -188,7 +188,7 @@ public void testPathsCanonicalized() throws Exception {
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, symlink + ":500B");
HddsVolume hddsVolume = volumeBuilder.conf(conf).build();
- long reservedFromVolume = hddsVolume.getVolumeInfo().get().getReservedInBytes();
+ long reservedFromVolume = hddsVolume.getVolumeUsage().get().getReservedInBytes();
assertEquals(500, reservedFromVolume);
}
@@ -212,7 +212,7 @@ public void testMinFreeSpaceCalculator() throws Exception {
private long getExpectedDefaultReserved(HddsVolume volume) {
- long totalCapacity = volume.getVolumeInfo().get().getUsageForTesting().realUsage().getCapacity();
+ long totalCapacity = volume.getVolumeUsage().get().realUsage().getCapacity();
return (long) Math.ceil(totalCapacity * HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT);
}
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index d3fc67d053e7..2f27b5363f4b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -23,7 +23,6 @@
import static org.assertj.core.api.Assumptions.assumeThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
@@ -213,8 +212,7 @@ public void testShutdown() throws Exception {
// Verify that volume usage can be queried during shutdown.
for (StorageVolume volume : volumesList) {
- assertNotNull(volume.getVolumeInfo().get()
- .getUsageForTesting());
+ assertThat(volume.getVolumeUsage()).isPresent();
volume.getCurrentUsage();
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index 1ab32f738458..6e26a3ae7ac6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -279,7 +279,7 @@ public void testMultipleDataDirs() throws Exception {
volumeList.forEach(storageVolume -> assertEquals(
(long) StorageSize.parse(reservedSpace).getValue(),
- storageVolume.getVolumeInfo().get().getReservedInBytes()));
+ storageVolume.getVolumeUsage().get().getReservedInBytes()));
}
}
diff --git a/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java b/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
index 1029860375dd..328e8a9692c7 100644
--- a/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
+++ b/hadoop-ozone/mini-cluster/src/main/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
@@ -159,7 +159,7 @@ public Builder setNumDataVolumes(int n) {
* for each volume in each datanode.
* @param reservedSpace String that contains the numeric size value and ends with a
* {@link org.apache.hadoop.hdds.conf.StorageUnit} suffix. For example, "50GB".
- * @see org.apache.hadoop.ozone.container.common.volume.VolumeInfo
+ * @see org.apache.hadoop.ozone.container.common.volume.VolumeUsage
*/
public Builder setReservedSpace(String reservedSpace) {
this.reservedSpace = reservedSpace;