diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index dbb913668833..7ffb02eb42ea 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -639,6 +639,17 @@ public final class OzoneConfigKeys {
OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT =
TimeUnit.HOURS.toMillis(1);
+ public static final String
+ OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE =
+ "ozone.om.snapshot.prune.compaction.backup.batch.size";
+
+ public static final int
+ OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT = 2000;
+
+ public static final String OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB =
+ "ozone.om.snapshot.load.native.lib";
+ public static final boolean OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT = true;
+
public static final String OZONE_OM_DELTA_UPDATE_DATA_SIZE_MAX_LIMIT =
"ozone.om.delta.update.data.size.max.limit";
public static final String
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index b250771f815d..dbc02cb4ed1f 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -4335,6 +4335,16 @@
+
+ ozone.om.snapshot.prune.compaction.backup.batch.size
+ 2000
+ OZONE, OM
+
+ Prune SST files in Compaction backup directory in batches every
+ ozone.om.snapshot.compaction.dag.prune.daemon.run.interval.
+
+
+
ozone.om.snapshot.compaction.dag.prune.daemon.run.interval
3600s
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index 45dd22e49fe1..c1e3fe034f26 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -515,6 +515,7 @@ message CompactionFileInfoProto {
optional string startKey = 2;
optional string endKey = 3;
optional string columnFamily = 4;
+ optional bool pruned = 5;
}
message CompactionLogEntryProto {
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java
index 5ad8513cfb40..46189600bb44 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionFileInfo.java
@@ -32,16 +32,26 @@ public final class CompactionFileInfo {
private final String startKey;
private final String endKey;
private final String columnFamily;
+ private boolean pruned;
@VisibleForTesting
public CompactionFileInfo(String fileName,
String startRange,
String endRange,
String columnFamily) {
+ this(fileName, startRange, endRange, columnFamily, false);
+ }
+
+ public CompactionFileInfo(String fileName,
+ String startRange,
+ String endRange,
+ String columnFamily,
+ boolean pruned) {
this.fileName = fileName;
this.startKey = startRange;
this.endKey = endRange;
this.columnFamily = columnFamily;
+ this.pruned = pruned;
}
public String getFileName() {
@@ -60,10 +70,19 @@ public String getColumnFamily() {
return columnFamily;
}
+ public boolean isPruned() {
+ return pruned;
+ }
+
+ public void setPruned() {
+ this.pruned = true;
+ }
+
public HddsProtos.CompactionFileInfoProto getProtobuf() {
HddsProtos.CompactionFileInfoProto.Builder builder =
HddsProtos.CompactionFileInfoProto.newBuilder()
- .setFileName(fileName);
+ .setFileName(fileName)
+ .setPruned(pruned);
if (startKey != null) {
builder = builder.setStartKey(startKey);
}
@@ -89,6 +108,9 @@ public static CompactionFileInfo getFromProtobuf(
if (proto.hasColumnFamily()) {
builder.setColumnFamily(proto.getColumnFamily());
}
+ if (proto.hasPruned() && proto.getPruned()) {
+ builder.setPruned();
+ }
return builder.build();
}
@@ -96,7 +118,7 @@ public static CompactionFileInfo getFromProtobuf(
@Override
public String toString() {
return String.format("fileName: '%s', startKey: '%s', endKey: '%s'," +
- " columnFamily: '%s'", fileName, startKey, endKey, columnFamily);
+ " columnFamily: '%s', isPruned: '%b'", fileName, startKey, endKey, columnFamily, pruned);
}
/**
@@ -107,6 +129,7 @@ public static class Builder {
private String startRange;
private String endRange;
private String columnFamily;
+ private boolean pruned = false;
public Builder(String fileName) {
Preconditions.checkNotNull(fileName, "FileName is required parameter.");
@@ -138,6 +161,11 @@ public Builder setValues(LiveFileMetaData fileMetaData) {
return this;
}
+ public Builder setPruned() {
+ this.pruned = true;
+ return this;
+ }
+
public CompactionFileInfo build() {
if ((startRange != null || endRange != null || columnFamily != null) &&
(startRange == null || endRange == null || columnFamily == null)) {
@@ -149,7 +177,7 @@ public CompactionFileInfo build() {
}
return new CompactionFileInfo(fileName, startRange, endRange,
- columnFamily);
+ columnFamily, pruned);
}
}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java
index 8ee48e43f903..85759e429210 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/CompactionLogEntry.java
@@ -129,13 +129,23 @@ public String toString() {
inputFileInfoList, outputFileInfoList, compactionReason);
}
+ public Builder toBuilder() {
+ Builder builder = new Builder(this.getDbSequenceNumber(), this.getCompactionTime(),
+ this.getInputFileInfoList(), this.getOutputFileInfoList());
+ String reason = this.getCompactionReason();
+ if (this.getCompactionReason() != null) {
+ builder.setCompactionReason(reason);
+ }
+ return builder;
+ }
+
/**
* Builder of CompactionLogEntry.
*/
public static class Builder {
private final long dbSequenceNumber;
private final long compactionTime;
- private final List inputFileInfoList;
+ private List inputFileInfoList;
private final List outputFileInfoList;
private String compactionReason;
@@ -157,6 +167,11 @@ public Builder setCompactionReason(String compactionReason) {
return this;
}
+ public Builder updateInputFileInfoList(List fileInfoList) {
+ this.inputFileInfoList = fileInfoList;
+ return this;
+ }
+
public CompactionLogEntry build() {
return new CompactionLogEntry(dbSequenceNumber, compactionTime,
inputFileInfoList, outputFileInfoList, compactionReason);
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
index 8f288ddc4587..583a3bb7bc1f 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
@@ -18,10 +18,16 @@
package org.apache.ozone.rocksdiff;
import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.commons.lang3.ArrayUtils.EMPTY_BYTE_ARRAY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -36,6 +42,7 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.Arrays;
@@ -46,8 +53,10 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.Queue;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -60,10 +69,17 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.CompactionLogEntryProto;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException;
import org.apache.hadoop.hdds.utils.Scheduler;
+import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter;
import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
import org.apache.ozone.compaction.log.CompactionFileInfo;
import org.apache.ozone.compaction.log.CompactionLogEntry;
@@ -138,6 +154,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable,
static final String SST_FILE_EXTENSION = ".sst";
public static final int SST_FILE_EXTENSION_LENGTH =
SST_FILE_EXTENSION.length();
+ static final String PRUNED_SST_FILE_TEMP = "pruned.sst.tmp";
private static final int LONG_MAX_STR_LEN =
String.valueOf(Long.MAX_VALUE).length();
@@ -153,7 +170,8 @@ public class RocksDBCheckpointDiffer implements AutoCloseable,
private final long maxAllowedTimeInDag;
private final BootstrapStateHandler.Lock lock
= new BootstrapStateHandler.Lock();
-
+ private static final int SST_READ_AHEAD_SIZE = 2 * 1024 * 1024;
+ private int pruneSSTFileBatchSize;
private ColumnFamilyHandle snapshotInfoTableCFHandle;
private static final String DAG_PRUNING_SERVICE_NAME = "CompactionDagPruningService";
private AtomicBoolean suspended;
@@ -161,6 +179,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable,
private ColumnFamilyHandle compactionLogTableCFHandle;
private ManagedRocksDB activeRocksDB;
private final ConcurrentMap inflightCompactions;
+ private Queue pruneQueue = null;
/**
* For snapshot diff calculation we only need to track following column
@@ -217,6 +236,19 @@ public class RocksDBCheckpointDiffer implements AutoCloseable,
OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
+ this.pruneSSTFileBatchSize = configuration.getInt(
+ OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE,
+ OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT);
+ try {
+ if (configuration.getBoolean(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)
+ && ManagedRawSSTFileReader.loadLibrary()) {
+ pruneQueue = new ConcurrentLinkedQueue<>();
+ }
+ } catch (NativeLibraryNotLoadedException e) {
+ LOG.warn("Native Library for raw sst file reading loading failed." +
+ " Cannot prune OMKeyInfo from SST files. {}", e.getMessage());
+ }
+
if (pruneCompactionDagDaemonRunIntervalInMs > 0) {
this.scheduler = new Scheduler(DAG_PRUNING_SERVICE_NAME,
true, 1);
@@ -231,8 +263,15 @@ public class RocksDBCheckpointDiffer implements AutoCloseable,
this::pruneSstFiles,
pruneCompactionDagDaemonRunIntervalInMs,
pruneCompactionDagDaemonRunIntervalInMs,
- TimeUnit.MILLISECONDS
- );
+ TimeUnit.MILLISECONDS);
+
+ if (pruneQueue != null) {
+ this.scheduler.scheduleWithFixedDelay(
+ this::pruneSstFileValues,
+ pruneCompactionDagDaemonRunIntervalInMs,
+ pruneCompactionDagDaemonRunIntervalInMs,
+ TimeUnit.MILLISECONDS);
+ }
} else {
this.scheduler = null;
}
@@ -466,6 +505,7 @@ public void onCompactionCompleted(RocksDB db,
}
CompactionLogEntry compactionLogEntry = builder.build();
+ byte[] key;
synchronized (this) {
if (closed) {
return;
@@ -478,7 +518,7 @@ public void onCompactionCompleted(RocksDB db,
}
// Add the compaction log entry to Compaction log table.
- addToCompactionLogTable(compactionLogEntry);
+ key = addToCompactionLogTable(compactionLogEntry);
// Populate the DAG
compactionDag.populateCompactionDAG(compactionLogEntry.getInputFileInfoList(),
@@ -488,12 +528,17 @@ public void onCompactionCompleted(RocksDB db,
inflightCompactions.remove(inputFile);
}
}
+ // Add the compaction log entry to the prune queue
+ // so that the backup input sst files can be pruned.
+ if (pruneQueue != null) {
+ pruneQueue.offer(key);
+ }
}
};
}
@VisibleForTesting
- void addToCompactionLogTable(CompactionLogEntry compactionLogEntry) {
+ byte[] addToCompactionLogTable(CompactionLogEntry compactionLogEntry) {
String dbSequenceIdStr =
String.valueOf(compactionLogEntry.getDbSequenceNumber());
@@ -518,6 +563,7 @@ void addToCompactionLogTable(CompactionLogEntry compactionLogEntry) {
// TODO: Revisit exception handling before merging the PR.
throw new RuntimeException(exception);
}
+ return key;
}
/**
@@ -697,6 +743,10 @@ private void loadCompactionDagFromDB() {
CompactionLogEntry.getFromProtobuf(CompactionLogEntryProto.parseFrom(value));
compactionDag.populateCompactionDAG(compactionLogEntry.getInputFileInfoList(),
compactionLogEntry.getOutputFileInfoList(), compactionLogEntry.getDbSequenceNumber());
+ // Add the compaction log entry to the prune queue so that the backup input sst files can be pruned.
+ if (pruneQueue != null) {
+ pruneQueue.offer(managedRocksIterator.get().key());
+ }
managedRocksIterator.get().next();
}
} catch (InvalidProtocolBufferException e) {
@@ -1200,6 +1250,107 @@ public void pruneSstFiles() {
}
}
+ /**
+ * Defines the task that removes OMKeyInfo from SST files from backup directory to
+ * save disk space.
+ */
+ public void pruneSstFileValues() {
+ if (!shouldRun()) {
+ return;
+ }
+
+ Path sstBackupDirPath = Paths.get(sstBackupDir);
+ Path prunedSSTFilePath = sstBackupDirPath.resolve(PRUNED_SST_FILE_TEMP);
+ try (ManagedOptions managedOptions = new ManagedOptions();
+ ManagedEnvOptions envOptions = new ManagedEnvOptions()) {
+ byte[] compactionLogEntryKey;
+ int batchCounter = 0;
+ while ((compactionLogEntryKey = pruneQueue.peek()) != null && ++batchCounter <= pruneSSTFileBatchSize) {
+ CompactionLogEntry compactionLogEntry;
+ // Get the compaction log entry.
+ synchronized (this) {
+ try {
+ compactionLogEntry = CompactionLogEntry.getCodec().fromPersistedFormat(
+ activeRocksDB.get().get(compactionLogTableCFHandle, compactionLogEntryKey));
+ } catch (RocksDBException ex) {
+ throw new RocksDatabaseException("Failed to get compaction log entry.", ex);
+ }
+
+ boolean shouldUpdateTable = false;
+ List fileInfoList = compactionLogEntry.getInputFileInfoList();
+ List updatedFileInfoList = new ArrayList<>();
+ for (CompactionFileInfo fileInfo : fileInfoList) {
+ // Skip pruning file if it is already pruned or is removed.
+ if (fileInfo.isPruned()) {
+ updatedFileInfoList.add(fileInfo);
+ continue;
+ }
+ Path sstFilePath = sstBackupDirPath.resolve(fileInfo.getFileName() + ROCKSDB_SST_SUFFIX);
+ if (Files.notExists(sstFilePath)) {
+ LOG.debug("Skipping pruning SST file {} as it does not exist in backup directory.", sstFilePath);
+ updatedFileInfoList.add(fileInfo);
+ continue;
+ }
+
+ // Prune file.sst => pruned.sst.tmp
+ Files.deleteIfExists(prunedSSTFilePath);
+ removeValueFromSSTFile(managedOptions, envOptions, sstFilePath.toFile().getAbsolutePath(),
+ prunedSSTFilePath.toFile().getAbsolutePath());
+
+ // Move pruned.sst.tmp => file.sst and replace existing file atomically.
+ try (BootstrapStateHandler.Lock lock = getBootstrapStateLock().lock()) {
+ Files.move(prunedSSTFilePath, sstFilePath,
+ StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
+ }
+ shouldUpdateTable = true;
+ fileInfo.setPruned();
+ updatedFileInfoList.add(fileInfo);
+ LOG.debug("Completed pruning OMKeyInfo from {}", sstFilePath);
+ }
+
+ // Update compaction log entry in table.
+ if (shouldUpdateTable) {
+ CompactionLogEntry.Builder builder = compactionLogEntry.toBuilder();
+ builder.updateInputFileInfoList(updatedFileInfoList);
+ try {
+ activeRocksDB.get().put(compactionLogTableCFHandle, compactionLogEntryKey,
+ builder.build().getProtobuf().toByteArray());
+ } catch (RocksDBException ex) {
+ throw new RocksDatabaseException("Failed to update the compaction log table for entry: "
+ + compactionLogEntry, ex);
+ }
+ }
+ }
+ pruneQueue.poll();
+ }
+ } catch (IOException | InterruptedException e) {
+ LOG.error("Could not prune source OMKeyInfo from backup SST files.", e);
+ }
+ }
+
+ private void removeValueFromSSTFile(ManagedOptions options, ManagedEnvOptions envOptions,
+ String sstFilePath, String prunedFilePath)
+ throws IOException {
+ try (ManagedRawSSTFileReader> sstFileReader = new ManagedRawSSTFileReader<>(
+ options, sstFilePath, SST_READ_AHEAD_SIZE);
+ ManagedRawSSTFileIterator> itr = sstFileReader.newIterator(
+ keyValue -> Pair.of(keyValue.getKey(), keyValue.getType()), null, null);
+ ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(envOptions, options);) {
+ sstFileWriter.open(prunedFilePath);
+ while (itr.hasNext()) {
+ Pair keyValue = itr.next();
+ if (keyValue.getValue() == 0) {
+ sstFileWriter.delete(keyValue.getKey());
+ } else {
+ sstFileWriter.put(keyValue.getKey(), EMPTY_BYTE_ARRAY);
+ }
+ }
+ sstFileWriter.finish();
+ } catch (RocksDBException ex) {
+ throw new RocksDatabaseException("Failed to write pruned entries for " + sstFilePath, ex);
+ }
+ }
+
public boolean shouldRun() {
return !suspended.get();
}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/compaction/log/TestCompactionFileInfo.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/compaction/log/TestCompactionFileInfo.java
index 88311173e3ed..5178bfda7efe 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/compaction/log/TestCompactionFileInfo.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/compaction/log/TestCompactionFileInfo.java
@@ -21,6 +21,7 @@
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.stream.Stream;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.CompactionFileInfoProto;
@@ -41,6 +42,12 @@ private static Stream compactionFileInfoValidScenarios() {
"endRange",
"columnFamily"
),
+ Arguments.of("Only fileName is present.",
+ "fileName",
+ null,
+ null,
+ null
+ ),
Arguments.of("Only fileName is present.",
"fileName",
null,
@@ -58,10 +65,15 @@ public void testCompactionFileInfoValidScenario(String description,
String endRange,
String columnFamily) {
- CompactionFileInfo compactionFileInfo =
- new CompactionFileInfo.Builder(fileName).setStartRange(startRange)
- .setEndRange(endRange).setColumnFamily(columnFamily).build();
+ CompactionFileInfo.Builder builder = new CompactionFileInfo.Builder(fileName).setStartRange(startRange)
+ .setEndRange(endRange).setColumnFamily(columnFamily);
+ CompactionFileInfo compactionFileInfo = builder.build();
assertNotNull(compactionFileInfo);
+ CompactionFileInfo prunedCompactionFileInfo = builder.setPruned().build();
+ assertFalse(compactionFileInfo.isPruned());
+ compactionFileInfo.setPruned();
+ assertTrue(compactionFileInfo.isPruned());
+ assertTrue(prunedCompactionFileInfo.isPruned());
}
private static Stream compactionFileInfoInvalidScenarios() {
@@ -215,5 +227,16 @@ public void testFromProtobuf(String description,
assertEquals(startRange, compactionFileInfo.getStartKey());
assertEquals(endRange, compactionFileInfo.getEndKey());
assertEquals(columnFamily, compactionFileInfo.getColumnFamily());
+ assertFalse(compactionFileInfo.isPruned());
+
+ CompactionFileInfoProto unPrunedProtobuf = builder.setPruned(false).build();
+ CompactionFileInfo unPrunedCompactionFileInfo =
+ CompactionFileInfo.getFromProtobuf(unPrunedProtobuf);
+ assertFalse(unPrunedCompactionFileInfo.isPruned());
+
+ CompactionFileInfoProto prunedProtobuf = builder.setPruned(true).build();
+ CompactionFileInfo prunedCompactionFileInfo =
+ CompactionFileInfo.getFromProtobuf(prunedProtobuf);
+ assertTrue(prunedCompactionFileInfo.isPruned());
}
}
diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index b6b05a02cc02..1a329647cc9f 100644
--- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -24,10 +24,15 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT;
import static org.apache.hadoop.util.Time.now;
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COLUMN_FAMILIES_TO_TRACK_IN_DAG;
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COMPACTION_LOG_FILE_NAME_SUFFIX;
+import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.PRUNED_SST_FILE_TEMP;
import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.SST_FILE_EXTENSION;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -37,6 +42,7 @@
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -57,6 +63,7 @@
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -75,17 +82,25 @@
import java.util.stream.Stream;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
import org.apache.hadoop.hdds.utils.db.managed.ManagedCheckpoint;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedFlushOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReaderIterator;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter;
import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
import org.apache.hadoop.util.Time;
import org.apache.ozone.compaction.log.CompactionFileInfo;
@@ -100,6 +115,7 @@
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
+import org.mockito.MockedConstruction;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.rocksdb.ColumnFamilyDescriptor;
@@ -350,11 +366,22 @@ public void init() throws RocksDBException {
OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS)).thenReturn(0L);
- rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(METADATA_DIR_NAME,
- SST_BACK_UP_DIR_NAME,
- COMPACTION_LOG_DIR_NAME,
- ACTIVE_DB_DIR_NAME,
- config);
+ when(config.getInt(
+ OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE,
+ OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_BACKUP_BATCH_SIZE_DEFAULT)).thenReturn(2000);
+
+ when(config.getBoolean(
+ OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB,
+ OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)).thenReturn(true);
+
+ try (MockedStatic mockedRawSSTReader = Mockito.mockStatic(ManagedRawSSTFileReader.class)) {
+ mockedRawSSTReader.when(ManagedRawSSTFileReader::loadLibrary).thenReturn(true);
+ rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(METADATA_DIR_NAME,
+ SST_BACK_UP_DIR_NAME,
+ COMPACTION_LOG_DIR_NAME,
+ ACTIVE_DB_DIR_NAME,
+ config);
+ }
ManagedColumnFamilyOptions cfOpts = new ManagedColumnFamilyOptions();
cfOpts.optimizeUniversalStyleCompaction();
@@ -1902,6 +1929,109 @@ private static Stream casesGetSSTDiffListWithoutDB2() {
);
}
+
+ /**
+ * Test that backup SST files are pruned on loading previous compaction logs.
+ */
+ @Test
+ public void testPruneSSTFileValues() throws Exception {
+
+ List> keys = new ArrayList>();
+ keys.add(Pair.of("key1".getBytes(UTF_8), Integer.valueOf(1)));
+ keys.add(Pair.of("key2".getBytes(UTF_8), Integer.valueOf(0)));
+ keys.add(Pair.of("key3".getBytes(UTF_8), Integer.valueOf(1)));
+
+ String inputFile78 = "000078";
+ String inputFile73 = "000073";
+ String outputFile81 = "000081";
+ // Create src & destination files in backup & activedirectory.
+ // Pruning job should succeed when pruned temp file is already present.
+ createSSTFileWithKeys(sstBackUpDir + "/" + inputFile78 + SST_FILE_EXTENSION, keys);
+ createSSTFileWithKeys(sstBackUpDir + "/" + inputFile73 + SST_FILE_EXTENSION, keys);
+ createSSTFileWithKeys(sstBackUpDir + PRUNED_SST_FILE_TEMP, keys);
+ createSSTFileWithKeys(activeDbDir + "/" + outputFile81 + SST_FILE_EXTENSION, keys);
+
+ // Load compaction log
+ CompactionLogEntry compactionLogEntry = new CompactionLogEntry(178, System.currentTimeMillis(),
+ Arrays.asList(
+ new CompactionFileInfo(inputFile78, "/volume/bucket1/key-5", "/volume/bucket2/key-10", "keyTable"),
+ new CompactionFileInfo(inputFile73, "/volume/bucket1/key-1", "/volume/bucket2/key-5", "keyTable")),
+ Collections.singletonList(
+ new CompactionFileInfo(outputFile81, "/volume/bucket1/key-1", "/volume/bucket2/key-10", "keyTable")),
+ null
+ );
+ byte[] compactionLogEntryKey = rocksDBCheckpointDiffer.addToCompactionLogTable(compactionLogEntry);
+ rocksDBCheckpointDiffer.loadAllCompactionLogs();
+
+ // Pruning should not fail a source SST file has been removed by a another pruner.
+ Files.delete(sstBackUpDir.toPath().resolve(inputFile73 + SST_FILE_EXTENSION));
+ // Run the SST file pruner.
+ ManagedRawSSTFileIterator mockedRawSSTFileItr = mock(ManagedRawSSTFileIterator.class);
+ Iterator keyItr = keys.iterator();
+ when(mockedRawSSTFileItr.hasNext()).thenReturn(true, true, true, false);
+ when(mockedRawSSTFileItr.next()).thenReturn(keyItr.next(), keyItr.next(), keyItr.next());
+ try (MockedConstruction mockedRawSSTReader = Mockito.mockConstruction(
+ ManagedRawSSTFileReader.class, (mock, context) -> {
+ when(mock.newIterator(any(), any(), any())).thenReturn(mockedRawSSTFileItr);
+ doNothing().when(mock).close();
+ })) {
+ rocksDBCheckpointDiffer.pruneSstFileValues();
+ }
+ // pruned.sst.tmp should be deleted when pruning job exits successfully.
+ assertFalse(Files.exists(sstBackUpDir.toPath().resolve(PRUNED_SST_FILE_TEMP)));
+
+ CompactionLogEntry updatedLogEntry;
+ try {
+ updatedLogEntry = CompactionLogEntry.getCodec().fromPersistedFormat(
+ activeRocksDB.get().get(compactionLogTableCFHandle, compactionLogEntryKey));
+ } catch (RocksDBException ex) {
+ throw new RocksDatabaseException("Failed to get compaction log entry.", ex);
+ }
+ CompactionFileInfo fileInfo78 = updatedLogEntry.getInputFileInfoList().get(0);
+ CompactionFileInfo fileInfo73 = updatedLogEntry.getInputFileInfoList().get(1);
+
+ // Verify 000078.sst has been pruned
+ assertEquals(inputFile78, fileInfo78.getFileName());
+ assertTrue(fileInfo78.isPruned());
+ ManagedSstFileReader sstFileReader = new ManagedSstFileReader(new ManagedOptions());
+ sstFileReader.open(sstBackUpDir.toPath().resolve(inputFile78 + SST_FILE_EXTENSION).toFile().getAbsolutePath());
+ ManagedSstFileReaderIterator itr = ManagedSstFileReaderIterator
+ .managed(sstFileReader.newIterator(new ManagedReadOptions()));
+ itr.get().seekToFirst();
+ int prunedKeys = 0;
+ while (itr.get().isValid()) {
+ // Verify that value is removed for non-tombstone keys.
+ assertEquals(0, itr.get().value().length);
+ prunedKeys++;
+ itr.get().next();
+ }
+ assertEquals(2, prunedKeys);
+ itr.close();
+ sstFileReader.close();
+
+ // Verify 000073.sst pruning has been skipped
+ assertFalse(fileInfo73.isPruned());
+ }
+
+ private void createSSTFileWithKeys(String filePath, List> keys)
+ throws Exception {
+ try (ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(new ManagedEnvOptions(), new ManagedOptions())) {
+ sstFileWriter.open(filePath);
+ Iterator> itr = keys.iterator();
+ while (itr.hasNext()) {
+ Pair entry = itr.next();
+ if (entry.getValue() == 0) {
+ sstFileWriter.delete(entry.getKey());
+ } else {
+ sstFileWriter.put(entry.getKey(), "dummyValue".getBytes(UTF_8));
+ }
+ }
+ sstFileWriter.finish();
+ } catch (RocksDBException ex) {
+ throw new RocksDatabaseException("Failed to get write " + filePath, ex);
+ }
+ }
+
/**
* Tests core SST diff list logic. Does not involve DB.
* Focuses on testing edge cases in internalGetSSTDiffList().
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 89b6e2a10312..782f21b26378 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -27,10 +27,6 @@
* Ozone Manager Constants.
*/
public final class OMConfigKeys {
- public static final String OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB =
- "ozone.om.snapshot.load.native.lib";
- public static final boolean OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT = true;
-
public static final String OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY =
"ozone.filesystem.snapshot.enabled";
public static final boolean OZONE_FILESYSTEM_SNAPSHOT_ENABLED_DEFAULT = true;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
index 64f75945fbe9..c7069d8ee467 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
@@ -22,7 +22,9 @@
import static org.apache.commons.lang3.StringUtils.leftPad;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConsts.COMPACTION_LOG_TABLE;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS;
@@ -93,11 +95,16 @@
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.CompactionLogEntryProto;
import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
import org.apache.hadoop.hdds.utils.db.DBProfile;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.RDBStore;
+import org.apache.hadoop.hdds.utils.db.RocksDatabase;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader;
+import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
@@ -139,7 +146,9 @@
import org.apache.hadoop.ozone.upgrade.UpgradeFinalization;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
+import org.apache.ozone.compaction.log.CompactionLogEntry;
import org.apache.ozone.rocksdiff.CompactionNode;
+import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.tag.Slow;
import org.junit.jupiter.api.AfterAll;
@@ -218,6 +227,9 @@ private void init() throws Exception {
conf.setInt(OMStorage.TESTING_INIT_LAYOUT_VERSION_KEY, OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion());
conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS);
conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE);
+ if (!disableNativeDiff) {
+ conf.setTimeDuration(OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL, 0, TimeUnit.SECONDS);
+ }
cluster = MiniOzoneCluster.newBuilder(conf)
.build();
@@ -2479,6 +2491,44 @@ public void testSnapshotCompactionDag() throws Exception {
assertEquals(200,
fetchReportPage(volume1, bucket3, "bucket3-snap1", "bucket3-snap3",
null, 0).getDiffList().size());
+
+ if (!disableNativeDiff) {
+ // Prune SST files in compaction backup directory.
+ RocksDatabase db = getRdbStore().getDb();
+ RocksDBCheckpointDiffer differ = getRdbStore().getRocksDBCheckpointDiffer();
+ differ.pruneSstFileValues();
+
+ // Verify backup SST files are pruned on DB compactions.
+ java.nio.file.Path sstBackUpDir = java.nio.file.Paths.get(differ.getSSTBackupDir());
+ try (ManagedOptions managedOptions = new ManagedOptions();
+ ManagedRocksIterator managedRocksIterator = new ManagedRocksIterator(
+ db.getManagedRocksDb().get().newIterator(db.getColumnFamily(COMPACTION_LOG_TABLE).getHandle()))) {
+ managedRocksIterator.get().seekToFirst();
+ while (managedRocksIterator.get().isValid()) {
+ byte[] value = managedRocksIterator.get().value();
+ CompactionLogEntry compactionLogEntry = CompactionLogEntry.getFromProtobuf(
+ CompactionLogEntryProto.parseFrom(value));
+ compactionLogEntry.getInputFileInfoList().forEach(
+ f -> {
+ java.nio.file.Path file = sstBackUpDir.resolve(f.getFileName() + ".sst");
+ if (COLUMN_FAMILIES_TO_TRACK_IN_DAG.contains(f.getColumnFamily()) && java.nio.file.Files.exists(file)) {
+ assertTrue(f.isPruned());
+ try (ManagedRawSSTFileReader sstFileReader = new ManagedRawSSTFileReader<>(
+ managedOptions, file.toFile().getAbsolutePath(), 2 * 1024 * 1024);
+ ManagedRawSSTFileIterator itr = sstFileReader.newIterator(
+ keyValue -> keyValue.getValue(), null, null)) {
+ while (itr.hasNext()) {
+ assertEquals(0, itr.next().length);
+ }
+ }
+ } else {
+ assertFalse(f.isPruned());
+ }
+ });
+ managedRocksIterator.get().next();
+ }
+ }
+ }
}
@Test
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
index 511426885d85..a6572bd1411c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
@@ -22,6 +22,8 @@
import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.DELETE;
import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.MODIFY;
import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.RENAME;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS;
@@ -34,8 +36,6 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
index 6cfb21b2feec..47e8dcdfb6f5 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.ozone.om.snapshot;
import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.DEFAULT_COLUMN_FAMILY_NAME;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME_DEFAULT;
@@ -27,8 +29,6 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.SNAP_DIFF_JOB_TABLE_NAME;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.SNAP_DIFF_REPORT_TABLE_NAME;