Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -175,9 +175,8 @@ public long getReservedInBytes() {
return reservedInBytes;
}

private static long getUsableSpace(
long available, long committed, long minFreeSpace) {
return available - committed - minFreeSpace;
public static long getUsableSpace(long available, long committed, long spared) {
return available - committed - spared;
}

public static long getUsableSpace(StorageReportProto report) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,19 @@
package org.apache.hadoop.ozone.container.diskbalancer;

import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_ALREADY_EXISTS;
import static org.apache.hadoop.ozone.container.diskbalancer.DiskBalancerVolumeCalculation.calculateVolumeDataDensity;
import static org.apache.hadoop.ozone.container.diskbalancer.DiskBalancerVolumeCalculation.getIdealUsage;
import static org.apache.hadoop.ozone.container.diskbalancer.DiskBalancerVolumeCalculation.getVolumeUsages;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
Expand Down Expand Up @@ -65,6 +68,7 @@
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
import org.apache.hadoop.ozone.container.common.volume.VolumeChoosingPolicyFactory;
import org.apache.hadoop.ozone.container.diskbalancer.DiskBalancerVolumeCalculation.VolumeFixedUsage;
import org.apache.hadoop.ozone.container.diskbalancer.policy.ContainerChoosingPolicy;
import org.apache.hadoop.ozone.container.diskbalancer.policy.DiskBalancerVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
Expand Down Expand Up @@ -632,54 +636,46 @@ private boolean tryCleanupOnePendingDeletionContainer() {
}

public DiskBalancerInfo getDiskBalancerInfo() {
ImmutableList<HddsVolume> immutableVolumeSet = DiskBalancerVolumeCalculation.getImmutableVolumeSet(volumeSet);
final List<VolumeFixedUsage> volumeUsages = getVolumeUsages(volumeSet, deltaSizes);

// Calculate volumeDataDensity
double volumeDatadensity = 0.0;
volumeDatadensity = DiskBalancerVolumeCalculation.calculateVolumeDataDensity(immutableVolumeSet, deltaSizes);
final double volumeDataDensity = calculateVolumeDataDensity(volumeUsages);

long bytesToMove = 0;
if (this.operationalState == DiskBalancerRunningStatus.RUNNING) {
// this calculates live changes in bytesToMove
// calculate bytes to move if the balancer is in a running state, else 0.
bytesToMove = calculateBytesToMove(immutableVolumeSet);
bytesToMove = calculateBytesToMove(volumeUsages);
}

return new DiskBalancerInfo(operationalState, threshold, bandwidthInMB,
parallelThread, stopAfterDiskEven, version, metrics.getSuccessCount(),
metrics.getFailureCount(), bytesToMove, metrics.getSuccessBytes(), volumeDatadensity);
metrics.getFailureCount(), bytesToMove, metrics.getSuccessBytes(), volumeDataDensity);
}

public long calculateBytesToMove(ImmutableList<HddsVolume> inputVolumeSet) {
public long calculateBytesToMove(List<VolumeFixedUsage> inputVolumeSet) {
// If there are no available volumes or only one volume, return 0 bytes to move
if (inputVolumeSet.isEmpty() || inputVolumeSet.size() < 2) {
return 0;
}

// Calculate ideal usage
double idealUsage = DiskBalancerVolumeCalculation.getIdealUsage(inputVolumeSet, deltaSizes);
double normalizedThreshold = threshold / 100.0;
// Calculate actual threshold
final double actualThreshold = getIdealUsage(inputVolumeSet) + threshold / 100.0;

long totalBytesToMove = 0;

// Calculate excess data in overused volumes
for (HddsVolume volume : inputVolumeSet) {
SpaceUsageSource usage = volume.getCurrentUsage();

for (VolumeFixedUsage volumeUsage : inputVolumeSet) {
final SpaceUsageSource.Fixed usage = volumeUsage.getUsage();
if (usage.getCapacity() == 0) {
continue;
}

long deltaSize = deltaSizes.getOrDefault(volume, 0L);
double currentUsage = (double)((usage.getCapacity() - usage.getAvailable())
+ deltaSize + volume.getCommittedBytes()) / usage.getCapacity();

double volumeUtilisation = currentUsage - idealUsage;

final double excess = volumeUsage.getUtilization() - actualThreshold;
// Only consider volumes that exceed the threshold (source volumes)
if (volumeUtilisation >= normalizedThreshold) {
if (excess > 0) {
// Calculate excess bytes that need to be moved from this volume
long excessBytes = (long) ((volumeUtilisation - normalizedThreshold) * usage.getCapacity());
final long excessBytes = (long) (excess * usage.getCapacity());
totalBytesToMove += Math.max(0, excessBytes);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,18 @@

package org.apache.hadoop.ozone.container.diskbalancer;

import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import static org.apache.ratis.util.Preconditions.assertInstanceOf;
import static org.apache.ratis.util.Preconditions.assertTrue;

import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.fs.SpaceUsageSource;
import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
import org.apache.hadoop.ozone.container.common.volume.VolumeUsage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -47,51 +51,42 @@ private DiskBalancerVolumeCalculation() {

/**
* Get an immutable snapshot of volumes from a MutableVolumeSet.
*
*
* @param volumeSet The MutableVolumeSet to create a snapshot from
* @return Immutable list of HddsVolume objects
* @return a list of volumes and usages
*/
public static ImmutableList<HddsVolume> getImmutableVolumeSet(MutableVolumeSet volumeSet) {
// Create an immutable copy of the volume list at this point in time
List<HddsVolume> volumes = StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
return ImmutableList.copyOf(volumes);
public static List<VolumeFixedUsage> getVolumeUsages(MutableVolumeSet volumeSet, Map<HddsVolume, Long> deltas) {
return volumeSet.getVolumesList().stream()
.map(v -> newVolumeFixedUsage(v, deltas))
.collect(Collectors.toList());
}

/**
* Get ideal usage from an immutable list of volumes.
*
* @param volumes Immutable list of volumes
* @param deltaMap A map that tracks the total bytes which will be freed
* from each source volume during container moves
* @return Ideal usage as a ratio (used space / total capacity)
* @throws IllegalArgumentException if total capacity is zero
*/
public static double getIdealUsage(ImmutableList<HddsVolume> volumes,
Map<HddsVolume, Long> deltaMap) {
public static double getIdealUsage(List<VolumeFixedUsage> volumes) {
long totalCapacity = 0L, totalEffectiveUsed = 0L;

for (HddsVolume volume : volumes) {
SpaceUsageSource usage = volume.getCurrentUsage();
totalCapacity += usage.getCapacity();
long currentUsed = usage.getCapacity() - usage.getAvailable();
long delta = (deltaMap != null) ? deltaMap.getOrDefault(volume, 0L) : 0L;
long committed = volume.getCommittedBytes();
totalEffectiveUsed += (currentUsed + delta + committed);
for (VolumeFixedUsage volumeUsage : volumes) {
totalCapacity += volumeUsage.getUsage().getCapacity();
totalEffectiveUsed += volumeUsage.getEffectiveUsed();
}

Preconditions.checkArgument(totalCapacity != 0);
return ((double) (totalEffectiveUsed)) / totalCapacity;
}

/**
* Calculate VolumeDataDensity.
*
* @param volumeSet The MutableVolumeSet containing all volumes
* @param deltaMap Map of volume to delta sizes (ongoing operations), can be null
* @return VolumeDataDensity sum across all volumes
*/
public static double calculateVolumeDataDensity(ImmutableList<HddsVolume> volumeSet,
Map<HddsVolume, Long> deltaMap) {
public static double calculateVolumeDataDensity(List<VolumeFixedUsage> volumeSet) {
if (volumeSet == null) {
LOG.warn("VolumeSet is null, returning 0.0 for VolumeDataDensity");
return 0.0;
Expand All @@ -104,18 +99,13 @@ public static double calculateVolumeDataDensity(ImmutableList<HddsVolume> volume
}

// Calculate ideal usage using the same immutable volume snapshot
double idealUsage = getIdealUsage(volumeSet, deltaMap);
final double idealUsage = getIdealUsage(volumeSet);
double volumeDensitySum = 0.0;

// Calculate density for each volume using the same snapshot
for (HddsVolume volume : volumeSet) {
SpaceUsageSource usage = volume.getCurrentUsage();
Preconditions.checkArgument(usage.getCapacity() != 0);

long deltaSize = (deltaMap != null) ? deltaMap.getOrDefault(volume, 0L) : 0L;
double currentUsage = (double)((usage.getCapacity() - usage.getAvailable())
+ deltaSize + volume.getCommittedBytes()) / usage.getCapacity();

for (VolumeFixedUsage volumeUsage : volumeSet) {
final double currentUsage = volumeUsage.getUtilization();

// Calculate density as absolute difference from ideal usage
double volumeDensity = Math.abs(currentUsage - idealUsage);
volumeDensitySum += volumeDensity;
Expand All @@ -126,4 +116,56 @@ public static double calculateVolumeDataDensity(ImmutableList<HddsVolume> volume
return -1.0;
}
}

public static double computeUtilization(SpaceUsageSource.Fixed usage, long committed, long required) {
final long capacity = usage.getCapacity();
assertTrue(capacity > 0, () -> "capacity = " + capacity + " <= 0");
return computeEffectiveUsage(usage, committed, required) / (double) capacity;
}

private static long computeEffectiveUsage(SpaceUsageSource.Fixed usage, long committed, long required) {
return usage.getCapacity() - usage.getAvailable() + committed + required;
}

public static VolumeFixedUsage newVolumeFixedUsage(StorageVolume volume, Map<HddsVolume, Long> deltaMap) {
final HddsVolume v = assertInstanceOf(volume, HddsVolume.class);
final long delta = deltaMap == null ? 0 : deltaMap.getOrDefault(v, 0L);
return new VolumeFixedUsage(v, delta);
}

/** {@link HddsVolume} with a {@link SpaceUsageSource.Fixed} usage. */
public static final class VolumeFixedUsage {
private final HddsVolume volume;
private final SpaceUsageSource.Fixed usage;
private final long effectiveUsed;
private final Double utilization;

private VolumeFixedUsage(HddsVolume volume, long delta) {
this.volume = volume;
this.usage = volume.getCurrentUsage();
this.effectiveUsed = computeEffectiveUsage(usage, volume.getCommittedBytes(), delta);
this.utilization = usage.getCapacity() > 0 ? computeUtilization(usage, volume.getCommittedBytes(), delta) : null;
}

public HddsVolume getVolume() {
return volume;
}

public SpaceUsageSource.Fixed getUsage() {
return usage;
}

public long getEffectiveUsed() {
return effectiveUsed;
}

public double getUtilization() {
return Objects.requireNonNull(utilization, "utilization == null");
}

public long computeUsableSpace() {
final long spared = volume.getFreeSpaceToSpare(usage.getCapacity());
return VolumeUsage.getUsableSpace(usage.getAvailable(), volume.getCommittedBytes(), spared);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,14 @@ public interface ContainerChoosingPolicy {
* @param destVolume the destination volume to which container is being moved.
* @param inProgressContainerIDs containerIDs present in this set should be
- avoided as these containers are already under move by diskBalancer.
* @param threshold the threshold value
* @param thresholdPercentage the threshold percentage in range [0, 100]
* @param volumeSet the volumeSet instance
* @param deltaMap the deltaMap instance of source volume
* @return a Container
*/
ContainerData chooseContainer(OzoneContainer ozoneContainer,
HddsVolume srcVolume, HddsVolume destVolume,
Set<ContainerID> inProgressContainerIDs,
Double threshold, MutableVolumeSet volumeSet,
double thresholdPercentage, MutableVolumeSet volumeSet,
Map<HddsVolume, Long> deltaMap);
}
Loading