From 6e241f65e1d1d2469f5e46b2cde7ecce44a6e0c0 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 09:39:47 -0400 Subject: [PATCH 001/126] HDDS-13765. SnapshotLocalData yaml should also track snapshotId Change-Id: Iba47aeb21663dfa407ab71339cef02c0d74b49f2 --- .../org/apache/hadoop/ozone/OzoneConsts.java | 1 + .../hadoop/ozone/om/OmSnapshotLocalData.java | 11 +++++++++- .../ozone/om/OmSnapshotLocalDataYaml.java | 8 +++++--- .../hadoop/ozone/om/OmSnapshotManager.java | 4 ++-- .../ozone/om/TestOmSnapshotLocalDataYaml.java | 20 +++++++++++++------ 5 files changed, 32 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index bb6eef205e44..c9064da1781c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -213,6 +213,7 @@ public final class OzoneConsts { public static final String OM_SLD_LAST_DEFRAG_TIME = "lastDefragTime"; public static final String OM_SLD_NEEDS_DEFRAG = "needsDefrag"; public static final String OM_SLD_VERSION_SST_FILE_INFO = "versionSstFileInfos"; + public static final String OM_SLD_SNAP_ID = "snapshotId"; public static final String OM_SLD_PREV_SNAP_ID = "previousSnapshotId"; public static final String OM_SLD_VERSION_META_SST_FILES = "sstFiles"; public static final String OM_SLD_VERSION_META_PREV_SNAP_VERSION = "previousSnapshotVersion"; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 7a351ba5c337..5f65fd4c0d08 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -41,6 +41,9 @@ */ public abstract class OmSnapshotLocalData { + // Unique identifier for the snapshot. This is used to identify the snapshot. + private UUID snapshotId; + // Version of the snapshot local data. 0 indicates not defragged snapshot. // defragged snapshots will have version > 0. private int version; @@ -70,7 +73,8 @@ public abstract class OmSnapshotLocalData { /** * Creates a OmSnapshotLocalData object with default values. */ - public OmSnapshotLocalData(List notDefraggedSSTFileList, UUID previousSnapshotId) { + public OmSnapshotLocalData(UUID snapshotId, List notDefraggedSSTFileList, UUID previousSnapshotId) { + this.snapshotId = snapshotId; this.isSSTFiltered = false; this.lastDefragTime = 0L; this.needsDefrag = false; @@ -93,6 +97,7 @@ public OmSnapshotLocalData(OmSnapshotLocalData source) { this.needsDefrag = source.needsDefrag; this.checksum = source.checksum; this.version = source.version; + this.snapshotId = source.snapshotId; this.previousSnapshotId = source.previousSnapshotId; this.versionSstFileInfos = new LinkedHashMap<>(); setVersionSstFileInfos(source.versionSstFileInfos); @@ -167,6 +172,10 @@ public UUID getPreviousSnapshotId() { return previousSnapshotId; } + public UUID getSnapshotId() { + return snapshotId; + } + public void setPreviousSnapshotId(UUID previousSnapshotId) { this.previousSnapshotId = previousSnapshotId; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index 3a80915e6eac..1d4fedfacaaf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -70,8 +70,8 @@ public final class OmSnapshotLocalDataYaml extends OmSnapshotLocalData { /** * Creates a new OmSnapshotLocalDataYaml with default values. */ - public OmSnapshotLocalDataYaml(List liveFileMetaDatas, UUID previousSnapshotId) { - super(liveFileMetaDatas, previousSnapshotId); + public OmSnapshotLocalDataYaml(UUID snapshotId, List liveFileMetaDatas, UUID previousSnapshotId) { + super(snapshotId, liveFileMetaDatas, previousSnapshotId); } /** @@ -227,8 +227,10 @@ private final class ConstructSnapshotLocalData extends AbstractConstruct { public Object construct(Node node) { MappingNode mnode = (MappingNode) node; Map nodes = constructMapping(mnode); + UUID snapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_SNAP_ID)); UUID prevSnapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID)); - OmSnapshotLocalDataYaml snapshotLocalData = new OmSnapshotLocalDataYaml(Collections.emptyList(), prevSnapId); + OmSnapshotLocalDataYaml snapshotLocalData = new OmSnapshotLocalDataYaml(snapId, Collections.emptyList(), + prevSnapId); // Set version from YAML Integer version = (Integer) nodes.get(OzoneConsts.OM_SLD_VERSION); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index d86b1ce6473f..d531f95c46b6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -645,8 +645,8 @@ public static void createNewOmSnapshotLocalDataFile(OmSnapshotManager snapshotMa SnapshotInfo snapshotInfo) throws IOException { Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); Files.deleteIfExists(snapshotLocalDataPath); - OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(getSnapshotSSTFileList(snapshotStore), - snapshotInfo.getPathPreviousSnapshotId()); + OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(snapshotInfo.getSnapshotId(), + getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); snapshotLocalDataYaml.writeToYaml(snapshotManager, snapshotLocalDataPath.toFile()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index eda95dc7b31d..8b41e5072185 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -104,7 +104,7 @@ private LiveFileMetaData createLiveFileMetaData(String fileName, String table, S /** * Creates a snapshot local data YAML file. */ - private Pair writeToYaml(String snapshotName) throws IOException { + private Pair writeToYaml(UUID snapshotId, String snapshotName) throws IOException { String yamlFilePath = snapshotName + ".yaml"; UUID previousSnapshotId = UUID.randomUUID(); // Create snapshot data with not defragged SST files @@ -112,7 +112,8 @@ private Pair writeToYaml(String snapshotName) throws IOException { createLiveFileMetaData("sst1", "table1", "k1", "k2"), createLiveFileMetaData("sst2", "table1", "k3", "k4"), createLiveFileMetaData("sst3", "table2", "k4", "k5")); - OmSnapshotLocalDataYaml dataYaml = new OmSnapshotLocalDataYaml(notDefraggedSSTFileList, previousSnapshotId); + OmSnapshotLocalDataYaml dataYaml = new OmSnapshotLocalDataYaml(snapshotId, notDefraggedSSTFileList, + previousSnapshotId); // Set version dataYaml.setVersion(42); @@ -146,7 +147,8 @@ private Pair writeToYaml(String snapshotName) throws IOException { @Test public void testWriteToYaml() throws IOException { - Pair yamlFilePrevIdPair = writeToYaml("snapshot1"); + UUID snapshotId = UUID.randomUUID(); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot1"); File yamlFile = yamlFilePrevIdPair.getLeft(); UUID prevSnapId = yamlFilePrevIdPair.getRight(); @@ -172,6 +174,7 @@ public void testWriteToYaml() throws IOException { assertEquals(2, defraggedSSTFiles.get(43).getSstFiles().size()); assertEquals(1, defraggedSSTFiles.get(44).getSstFiles().size()); assertEquals(prevSnapId, snapshotData.getPreviousSnapshotId()); + assertEquals(snapshotId, snapshotData.getSnapshotId()); assertEquals(ImmutableMap.of( 0, new VersionMeta(0, ImmutableList.of(new SstFileInfo("sst1", "k1", "k2", "table1"), @@ -186,7 +189,8 @@ public void testWriteToYaml() throws IOException { @Test public void testUpdateSnapshotDataFile() throws IOException { - Pair yamlFilePrevIdPair = writeToYaml("snapshot2"); + UUID snapshotId = UUID.randomUUID(); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file OmSnapshotLocalDataYaml dataYaml = @@ -228,7 +232,8 @@ public void testEmptyFile() throws IOException { @Test public void testChecksum() throws IOException { - Pair yamlFilePrevIdPair = writeToYaml("snapshot3"); + UUID snapshotId = UUID.randomUUID(); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, yamlFile); @@ -244,7 +249,8 @@ public void testChecksum() throws IOException { @Test public void testYamlContainsAllFields() throws IOException { - Pair yamlFilePrevIdPair = writeToYaml("snapshot4"); + UUID snapshotId = UUID.randomUUID(); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot4"); File yamlFile = yamlFilePrevIdPair.getLeft(); String content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); @@ -255,5 +261,7 @@ public void testYamlContainsAllFields() throws IOException { assertThat(content).contains(OzoneConsts.OM_SLD_LAST_DEFRAG_TIME); assertThat(content).contains(OzoneConsts.OM_SLD_NEEDS_DEFRAG); assertThat(content).contains(OzoneConsts.OM_SLD_VERSION_SST_FILE_INFO); + assertThat(content).contains(OzoneConsts.OM_SLD_SNAP_ID); + assertThat(content).contains(OzoneConsts.OM_SLD_PREV_SNAP_ID); } } From a8695001fdbc60e5dfad0bb1f9ec1526e06c9db9 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 11:21:27 -0400 Subject: [PATCH 002/126] HDDS-13627. In memory Manager for Snapshot Local Data Change-Id: Ifd2feca1fddb144e4955db025f0b15a2ab1f3bfe --- .../OMDBCheckpointServletInodeBasedXfer.java | 3 +- .../ozone/om/OmSnapshotLocalDataYaml.java | 24 +-- .../hadoop/ozone/om/OmSnapshotManager.java | 75 ++------ .../snapshot/OMSnapshotPurgeResponse.java | 13 +- .../snapshot/OmSnapshotLocalDataManager.java | 160 ++++++++++++++++++ .../ozone/om/TestOmSnapshotLocalDataYaml.java | 23 +-- .../ozone/om/TestOmSnapshotManager.java | 15 +- ...TestOMSnapshotPurgeRequestAndResponse.java | 6 +- .../TestOMSnapshotCreateResponse.java | 5 +- .../TestOMSnapshotDeleteResponse.java | 5 +- 10 files changed, 225 insertions(+), 104 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java index 1acd9593c822..8a58ed6aa764 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java @@ -67,6 +67,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; @@ -298,7 +299,7 @@ private void transferSnapshotData(Set sstFilesToExclude, Path tmpdir, Se writeDBToArchive(sstFilesToExclude, snapshotDir, maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap, false); Path snapshotLocalPropertyYaml = Paths.get( - OmSnapshotManager.getSnapshotLocalPropertyYamlPath(snapshotDir)); + OmSnapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotDir)); if (Files.exists(snapshotLocalPropertyYaml)) { File yamlFile = snapshotLocalPropertyYaml.toFile(); hardLinkFileMap.put(yamlFile.getAbsolutePath(), yamlFile.getName()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index 1d4fedfacaaf..a3683e11c16f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -31,6 +31,7 @@ import org.apache.commons.pool2.impl.DefaultPooledObject; import org.apache.hadoop.hdds.server.YamlUtils; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.ozone.compaction.log.SstFileInfo; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.rocksdb.LiveFileMetaData; @@ -66,6 +67,7 @@ public final class OmSnapshotLocalDataYaml extends OmSnapshotLocalData { public static final Tag SNAPSHOT_YAML_TAG = new Tag("OmSnapshotLocalData"); public static final Tag SNAPSHOT_VERSION_META_TAG = new Tag("VersionMeta"); public static final Tag SST_FILE_INFO_TAG = new Tag("SstFileInfo"); + public static final String YAML_FILE_EXTENSION = ".yaml"; /** * Creates a new OmSnapshotLocalDataYaml with default values. @@ -88,7 +90,7 @@ public OmSnapshotLocalDataYaml(OmSnapshotLocalData source) { * @return true if the checksum is valid, false otherwise * @throws IOException if there's an error computing the checksum */ - public static boolean verifyChecksum(OmSnapshotManager snapshotManager, OmSnapshotLocalData snapshotData) + public static boolean verifyChecksum(OmSnapshotLocalDataManager localDataManager, OmSnapshotLocalData snapshotData) throws IOException { Preconditions.checkNotNull(snapshotData, "snapshotData cannot be null"); @@ -106,7 +108,7 @@ public static boolean verifyChecksum(OmSnapshotManager snapshotManager, OmSnapsh snapshotDataCopy.setChecksum(null); // Get the YAML representation - try (UncheckedAutoCloseableSupplier yaml = snapshotManager.getSnapshotLocalYaml()) { + try (UncheckedAutoCloseableSupplier yaml = localDataManager.getSnapshotLocalYaml()) { // Compute new checksum snapshotDataCopy.computeAndSetChecksum(yaml.get()); @@ -272,8 +274,8 @@ public Object construct(Node node) { * (without triggering checksum computation or persistence). * @return YAML string representation */ - public String getYaml(OmSnapshotManager snapshotManager) throws IOException { - try (UncheckedAutoCloseableSupplier yaml = snapshotManager.getSnapshotLocalYaml()) { + public String getYaml(OmSnapshotLocalDataManager snapshotLocalDataManager) throws IOException { + try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { return yaml.get().dump(this); } } @@ -283,9 +285,9 @@ public String getYaml(OmSnapshotManager snapshotManager) throws IOException { * @param yamlFile The file to write to * @throws IOException If there's an error writing to the file */ - public void writeToYaml(OmSnapshotManager snapshotManager, File yamlFile) throws IOException { + public void writeToYaml(OmSnapshotLocalDataManager snapshotLocalDataManager, File yamlFile) throws IOException { // Create Yaml - try (UncheckedAutoCloseableSupplier yaml = snapshotManager.getSnapshotLocalYaml()) { + try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { // Compute Checksum and update SnapshotData computeAndSetChecksum(yaml.get()); // Write the SnapshotData with checksum to Yaml file. @@ -299,11 +301,11 @@ public void writeToYaml(OmSnapshotManager snapshotManager, File yamlFile) throws * @return A new OmSnapshotLocalDataYaml instance * @throws IOException If there's an error reading the file */ - public static OmSnapshotLocalDataYaml getFromYamlFile(OmSnapshotManager snapshotManager, File yamlFile) - throws IOException { + public static OmSnapshotLocalDataYaml getFromYamlFile(OmSnapshotLocalDataManager snapshotLocalDataManager, + File yamlFile) throws IOException { Preconditions.checkNotNull(yamlFile, "yamlFile cannot be null"); try (InputStream inputFileStream = Files.newInputStream(yamlFile.toPath())) { - return getFromYamlStream(snapshotManager, inputFileStream); + return getFromYamlStream(snapshotLocalDataManager, inputFileStream); } } @@ -311,10 +313,10 @@ public static OmSnapshotLocalDataYaml getFromYamlFile(OmSnapshotManager snapshot * Read the YAML content InputStream, and return OmSnapshotLocalDataYaml instance. * @throws IOException */ - public static OmSnapshotLocalDataYaml getFromYamlStream(OmSnapshotManager snapshotManager, + public static OmSnapshotLocalDataYaml getFromYamlStream(OmSnapshotLocalDataManager snapshotLocalDataManager, InputStream input) throws IOException { OmSnapshotLocalDataYaml dataYaml; - try (UncheckedAutoCloseableSupplier yaml = snapshotManager.getSnapshotLocalYaml()) { + try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { dataYaml = yaml.get().load(input); } catch (YAMLException ex) { // Unchecked exception. Convert to IOException diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index d531f95c46b6..19fe367bb923 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -82,7 +82,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import org.apache.commons.lang3.tuple.Pair; -import org.apache.commons.pool2.impl.GenericObjectPool; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.ratis.RatisHelper; @@ -102,6 +101,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.service.SnapshotDiffCleanupService; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; @@ -117,7 +117,6 @@ import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.Yaml; /** * This class is used to manage/create OM snapshots. @@ -186,7 +185,7 @@ public final class OmSnapshotManager implements AutoCloseable { private final List columnFamilyDescriptors; private final List columnFamilyHandles; private final SnapshotDiffCleanupService snapshotDiffCleanupService; - private final GenericObjectPool yamlPool; + private final OmSnapshotLocalDataManager snapshotLocalDataManager; private final int maxPageSize; @@ -197,7 +196,7 @@ public final class OmSnapshotManager implements AutoCloseable { private final AtomicInteger inFlightSnapshotCount = new AtomicInteger(0); public OmSnapshotManager(OzoneManager ozoneManager) { - this.yamlPool = new GenericObjectPool<>(new OmSnapshotLocalDataYaml.YamlFactory()); + this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager()); boolean isFilesystemSnapshotEnabled = ozoneManager.isFilesystemSnapshotEnabled(); LOG.info("Ozone filesystem snapshot feature is {}.", @@ -517,11 +516,12 @@ public static DBCheckpoint createOmSnapshotCheckpoint( } OmSnapshotManager omSnapshotManager = ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getOmSnapshotManager(); + OmSnapshotLocalDataManager snapshotLocalDataManager = omSnapshotManager.getSnapshotLocalDataManager(); OzoneConfiguration configuration = ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getConfiguration(); try (OmMetadataManagerImpl checkpointMetadataManager = OmMetadataManagerImpl.createCheckpointMetadataManager(configuration, dbCheckpoint)) { // Create the snapshot local property file. - OmSnapshotManager.createNewOmSnapshotLocalDataFile(omSnapshotManager, + snapshotLocalDataManager.createNewOmSnapshotLocalDataFile( (RDBStore) checkpointMetadataManager.getStore(), snapshotInfo); } @@ -628,28 +628,12 @@ private static void deleteKeysFromDelKeyTableInSnapshotScope( * @param store AOS or snapshot DB for not defragged or defragged snapshot respectively. * @return a Map of (table, set of SST files corresponding to the table) */ - private static List getSnapshotSSTFileList(RDBStore store) - throws IOException { + public static List getSnapshotSSTFileList(RDBStore store) throws IOException { return store.getDb().getLiveFilesMetaData().stream() .filter(lfm -> COLUMN_FAMILIES_TO_TRACK_IN_SNAPSHOT.contains(StringUtils.bytes2String(lfm.columnFamilyName()))) .collect(Collectors.toList()); } - /** - * Creates and writes snapshot local properties to a YAML file with not defragged SST file list. - * @param snapshotManager snapshot manager instance. - * @param snapshotStore snapshot metadata manager. - * @param snapshotInfo snapshot info instance corresponding to snapshot. - */ - public static void createNewOmSnapshotLocalDataFile(OmSnapshotManager snapshotManager, RDBStore snapshotStore, - SnapshotInfo snapshotInfo) throws IOException { - Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); - Files.deleteIfExists(snapshotLocalDataPath); - OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(snapshotInfo.getSnapshotId(), - getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); - snapshotLocalDataYaml.writeToYaml(snapshotManager, snapshotLocalDataPath.toFile()); - } - // Get OmSnapshot if the keyName has ".snapshot" key indicator @SuppressWarnings("unchecked") public UncheckedAutoCloseableSupplier getActiveFsMetadataOrSnapshot( @@ -691,24 +675,8 @@ public UncheckedAutoCloseableSupplier getSnapshot( return getSnapshot(volumeName, bucketName, snapshotName, true); } - public UncheckedAutoCloseableSupplier getSnapshotLocalYaml() throws IOException { - try { - Yaml yaml = yamlPool.borrowObject(); - return new UncheckedAutoCloseableSupplier() { - - @Override - public void close() { - yamlPool.returnObject(yaml); - } - - @Override - public Yaml get() { - return yaml; - } - }; - } catch (Exception e) { - throw new IOException("Failed to get snapshot local yaml", e); - } + public OmSnapshotLocalDataManager getSnapshotLocalDataManager() { + return snapshotLocalDataManager; } private UncheckedAutoCloseableSupplier getSnapshot( @@ -856,29 +824,6 @@ public static String extractSnapshotIDFromCheckpointDirName(String snapshotPath) return snapshotPath.substring(index + OM_DB_NAME.length() + OM_SNAPSHOT_SEPARATOR.length()); } - /** - * Returns the path to the YAML file that stores local properties for the given snapshot. - * - * @param omMetadataManager metadata manager to get the base path - * @param snapshotInfo snapshot metadata - * @return the path to the snapshot's local property YAML file - */ - public static String getSnapshotLocalPropertyYamlPath(OMMetadataManager omMetadataManager, - SnapshotInfo snapshotInfo) { - Path snapshotPath = getSnapshotPath(omMetadataManager, snapshotInfo); - return getSnapshotLocalPropertyYamlPath(snapshotPath); - } - - /** - * Returns the path to the YAML file that stores local properties for the given snapshot. - * - * @param snapshotPath path to the snapshot checkpoint dir - * @return the path to the snapshot's local property YAML file - */ - public static String getSnapshotLocalPropertyYamlPath(Path snapshotPath) { - return snapshotPath.toString() + ".yaml"; - } - public static boolean isSnapshotKey(String[] keyParts) { return (keyParts.length > 1) && (keyParts[0].compareTo(OM_SNAPSHOT_INDICATOR) == 0); @@ -1199,8 +1144,8 @@ public void close() { if (options != null) { options.close(); } - if (yamlPool != null) { - yamlPool.close(); + if (snapshotLocalDataManager != null) { + snapshotLocalDataManager.close(); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index ef3555f54350..75ba2a8f9501 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -98,7 +99,9 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, ((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager() .removeFromSnapshotIdToTable(snapshotInfo.getSnapshotId()); // Delete Snapshot checkpoint directory. - deleteCheckpointDirectory(omMetadataManager, snapshotInfo); + OmSnapshotLocalDataManager snapshotLocalDataManager = ((OmMetadataManagerImpl) omMetadataManager) + .getOzoneManager().getOmSnapshotManager().getSnapshotLocalDataManager(); + deleteCheckpointDirectory(snapshotLocalDataManager, omMetadataManager, snapshotInfo); // Delete snapshotInfo from the table. omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey); } @@ -117,8 +120,8 @@ private void updateSnapInfo(OmMetadataManagerImpl metadataManager, /** * Deletes the checkpoint directory for a snapshot. */ - private void deleteCheckpointDirectory(OMMetadataManager omMetadataManager, - SnapshotInfo snapshotInfo) { + private void deleteCheckpointDirectory(OmSnapshotLocalDataManager snapshotLocalDataManager, + OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) { // Acquiring write lock to avoid race condition with sst filtering service which creates a sst filtered file // inside the snapshot directory. Any operation apart which doesn't create/delete files under this snapshot // directory can run in parallel along with this operation. @@ -127,8 +130,8 @@ private void deleteCheckpointDirectory(OMMetadataManager omMetadataManager, boolean acquiredSnapshotLock = omLockDetails.isLockAcquired(); if (acquiredSnapshotLock) { Path snapshotDirPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo); - Path snapshotLocalDataPath = Paths.get( - OmSnapshotManager.getSnapshotLocalPropertyYamlPath(omMetadataManager, snapshotInfo)); + // TODO: Do not delete on snapshot purge. OmSnapshotLocalDataManager should delete orphan local data files. + Path snapshotLocalDataPath = Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); try { FileUtils.deleteDirectory(snapshotDirPath.toFile()); Files.deleteIfExists(snapshotLocalDataPath); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java new file mode 100644 index 000000000000..fb6d7cf744a9 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; + +import com.google.common.graph.GraphBuilder; +import com.google.common.graph.MutableGraph; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Objects; +import java.util.UUID; +import org.apache.commons.pool2.impl.GenericObjectPool; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.yaml.snakeyaml.Yaml; + +/** + * Manages local data and metadata associated with Ozone Manager (OM) snapshots, + * including the creation, storage, and representation of data as YAML files. + */ +public class OmSnapshotLocalDataManager implements AutoCloseable { + + private final GenericObjectPool yamlPool; + private final MutableGraph localDataGraph; + private final OMMetadataManager omMetadataManager; + + public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) { + this.yamlPool = new GenericObjectPool(new OmSnapshotLocalDataYaml.YamlFactory()); + this.localDataGraph = GraphBuilder.directed().build(); + this.omMetadataManager = omMetadataManager; + init(); + } + + /** + * Returns the path to the YAML file that stores local properties for the given snapshot. + * + * @param snapshotPath path to the snapshot checkpoint dir + * @return the path to the snapshot's local property YAML file + */ + public static String getSnapshotLocalPropertyYamlPath(Path snapshotPath) { + return snapshotPath.toString() + YAML_FILE_EXTENSION; + } + + /** + * Returns the path to the YAML file that stores local properties for the given snapshot. + * + * @param snapshotInfo snapshot metadata + * @return the path to the snapshot's local property YAML file + */ + public String getSnapshotLocalPropertyYamlPath(SnapshotInfo snapshotInfo) { + Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo); + return getSnapshotLocalPropertyYamlPath(snapshotPath); + } + + /** + * Creates and writes snapshot local properties to a YAML file with not defragged SST file list. + * @param snapshotStore snapshot metadata manager. + * @param snapshotInfo snapshot info instance corresponding to snapshot. + */ + public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInfo snapshotInfo) throws IOException { + Path snapshotLocalDataPath = Paths.get( + getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); + Files.deleteIfExists(snapshotLocalDataPath); + OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(snapshotInfo.getSnapshotId(), + OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); + snapshotLocalDataYaml.writeToYaml(this, snapshotLocalDataPath.toFile()); + } + + private void init() { + RDBStore store = (RDBStore) omMetadataManager.getStore(); + String checkpointPrefix = store.getDbLocation().getName(); + File snapshotDir = new File(store.getSnapshotsParentDir()); + for (File yamlFile : + Objects.requireNonNull(snapshotDir.listFiles( + (dir, name) -> name.startsWith(checkpointPrefix) && name.endsWith(YAML_FILE_EXTENSION)))) { + System.out.println(yamlFile.getAbsolutePath()); + } + } + + @Override + public void close() { + if (yamlPool != null) { + yamlPool.close(); + } + } + + private final class VersionLocalDataNode { + private UUID snapshotId; + private int version; + private UUID previousSnapshotId; + private int previousSnapshotVersion; + + private VersionLocalDataNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { + this.previousSnapshotId = previousSnapshotId; + this.previousSnapshotVersion = previousSnapshotVersion; + this.snapshotId = snapshotId; + this.version = version; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof VersionLocalDataNode)) { + return false; + } + + VersionLocalDataNode that = (VersionLocalDataNode) o; + return version == that.version && previousSnapshotVersion == that.previousSnapshotVersion && + snapshotId.equals(that.snapshotId) && Objects.equals(previousSnapshotId, that.previousSnapshotId); + } + + @Override + public int hashCode() { + return Objects.hash(snapshotId, version, previousSnapshotId, previousSnapshotVersion); + } + } + + public UncheckedAutoCloseableSupplier getSnapshotLocalYaml() throws IOException { + try { + Yaml yaml = yamlPool.borrowObject(); + return new UncheckedAutoCloseableSupplier() { + + @Override + public void close() { + yamlPool.returnObject(yaml); + } + + @Override + public Yaml get() { + return yaml; + } + }; + } catch (Exception e) { + throw new IOException("Failed to get snapshot local yaml", e); + } + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index 8b41e5072185..71933f8112c4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.ozone.compaction.log.SstFileInfo; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; @@ -59,7 +60,7 @@ public class TestOmSnapshotLocalDataYaml { private static String testRoot = new FileSystemTestHelper().getTestRootDir(); - private static OmSnapshotManager omSnapshotManager; + private static OmSnapshotLocalDataManager snapshotLocalDataManager; private static final Yaml YAML = new OmSnapshotLocalDataYaml.YamlFactory().create(); private static final UncheckedAutoCloseableSupplier YAML_SUPPLIER = new UncheckedAutoCloseableSupplier() { @Override @@ -77,8 +78,8 @@ public void close() { @BeforeAll public static void setupClassMocks() throws IOException { - omSnapshotManager = mock(OmSnapshotManager.class); - when(omSnapshotManager.getSnapshotLocalYaml()).thenReturn(YAML_SUPPLIER); + snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); + when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(YAML_SUPPLIER); } @BeforeEach @@ -137,7 +138,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw File yamlFile = new File(testRoot, yamlFilePath); // Create YAML file with SnapshotData - dataYaml.writeToYaml(omSnapshotManager, yamlFile); + dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); // Check YAML file exists assertTrue(yamlFile.exists()); @@ -153,7 +154,7 @@ public void testWriteToYaml() throws IOException { UUID prevSnapId = yamlFilePrevIdPair.getRight(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, yamlFile); + OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); // Verify fields assertEquals(44, snapshotData.getVersion()); @@ -194,7 +195,7 @@ public void testUpdateSnapshotDataFile() throws IOException { File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file OmSnapshotLocalDataYaml dataYaml = - OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, yamlFile); + OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); // Update snapshot data dataYaml.setSstFiltered(false); @@ -203,10 +204,10 @@ public void testUpdateSnapshotDataFile() throws IOException { singletonList(new SstFileInfo("defragged-sst4", "k5", "k6", "table3")), 5); // Write updated data back to file - dataYaml.writeToYaml(omSnapshotManager, yamlFile); + dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); // Read back the updated data - dataYaml = OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, yamlFile); + dataYaml = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); // Verify updated data assertThat(dataYaml.getSstFiltered()).isFalse(); @@ -225,7 +226,7 @@ public void testEmptyFile() throws IOException { assertTrue(emptyFile.createNewFile()); IOException ex = assertThrows(IOException.class, () -> - OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, emptyFile)); + OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, emptyFile)); assertThat(ex).hasMessageContaining("Failed to load snapshot file. File is empty."); } @@ -236,7 +237,7 @@ public void testChecksum() throws IOException { Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, yamlFile); + OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); // Get the original checksum String originalChecksum = snapshotData.getChecksum(); @@ -244,7 +245,7 @@ public void testChecksum() throws IOException { // Verify the checksum is not null or empty assertThat(originalChecksum).isNotNull().isNotEmpty(); - assertTrue(OmSnapshotLocalDataYaml.verifyChecksum(omSnapshotManager, snapshotData)); + assertTrue(OmSnapshotLocalDataYaml.verifyChecksum(snapshotLocalDataManager, snapshotData)); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index df2b026bce40..62f9561d2b83 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -82,6 +82,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.util.Time; import org.apache.ozone.compaction.log.SstFileInfo; @@ -107,6 +108,7 @@ class TestOmSnapshotManager { private SnapshotChainManager snapshotChainManager; private OmMetadataManagerImpl omMetadataManager; private OmSnapshotManager omSnapshotManager; + private OmSnapshotLocalDataManager snapshotLocalDataManager; private static final String CANDIDATE_DIR_NAME = OM_DB_NAME + SNAPSHOT_CANDIDATE_DIR; private File leaderDir; @@ -139,6 +141,7 @@ void init(@TempDir File tempDir) throws Exception { om = omTestManagers.getOzoneManager(); omMetadataManager = (OmMetadataManagerImpl) om.getMetadataManager(); omSnapshotManager = om.getOmSnapshotManager(); + snapshotLocalDataManager = om.getOmSnapshotManager().getSnapshotLocalDataManager(); snapshotChainManager = omMetadataManager.getSnapshotChainManager(); } @@ -158,8 +161,8 @@ void cleanup() throws IOException { SnapshotInfo snapshotInfo = snapshotInfoTable.get(snapshotInfoKey); snapshotChainManager.deleteSnapshot(snapshotInfo); snapshotInfoTable.delete(snapshotInfoKey); - Path snapshotYaml = Paths.get(OmSnapshotManager.getSnapshotLocalPropertyYamlPath( - om.getMetadataManager(), snapshotInfo)); + + Path snapshotYaml = Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); Files.deleteIfExists(snapshotYaml); } omSnapshotManager.invalidateCache(); @@ -310,19 +313,19 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { when(mockedStore.getDb()).thenReturn(mockedDb); when(mockedDb.getLiveFilesMetaData()).thenReturn(mockedLiveFiles); - Path snapshotYaml = Paths.get(OmSnapshotManager.getSnapshotLocalPropertyYamlPath( - omMetadataManager, snapshotInfo)); + Path snapshotYaml = Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); when(mockedStore.getDbLocation()).thenReturn(getSnapshotPath(omMetadataManager, snapshotInfo).toFile()); // Create an existing YAML file for the snapshot assertTrue(snapshotYaml.toFile().createNewFile()); assertEquals(0, Files.size(snapshotYaml)); // Create a new YAML file for the snapshot - OmSnapshotManager.createNewOmSnapshotLocalDataFile(omSnapshotManager, mockedStore, snapshotInfo); + snapshotLocalDataManager.createNewOmSnapshotLocalDataFile(mockedStore, snapshotInfo); // Verify that previous file was overwritten assertTrue(Files.exists(snapshotYaml)); assertTrue(Files.size(snapshotYaml) > 0); // Verify the contents of the YAML file - OmSnapshotLocalData localData = OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, snapshotYaml.toFile()); + OmSnapshotLocalData localData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, + snapshotYaml.toFile()); assertNotNull(localData); assertEquals(0, localData.getVersion()); assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index d2ceb5a44786..0fb26a4cd993 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -47,11 +47,11 @@ import org.apache.hadoop.hdds.utils.db.CodecException; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; @@ -164,7 +164,7 @@ public void testValidateAndUpdateCache() throws Exception { for (Path checkpoint : checkpointPaths) { assertTrue(Files.exists(checkpoint)); assertTrue(Files.exists(Paths.get( - OmSnapshotManager.getSnapshotLocalPropertyYamlPath(checkpoint)))); + OmSnapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(checkpoint)))); } OMRequest snapshotPurgeRequest = createPurgeKeysRequest( @@ -191,7 +191,7 @@ public void testValidateAndUpdateCache() throws Exception { for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); assertFalse(Files.exists(Paths.get( - OmSnapshotManager.getSnapshotLocalPropertyYamlPath(checkpoint)))); + OmSnapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(checkpoint)))); } assertEquals(initialSnapshotPurgeCount + 1, getOmSnapshotIntMetrics().getNumSnapshotPurges()); assertEquals(initialSnapshotPurgeFailCount, getOmSnapshotIntMetrics().getNumSnapshotPurgeFails()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index 1e78943c7b5e..ce24040a3eab 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -91,9 +92,11 @@ public void close() { }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); + OmSnapshotLocalDataManager snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - when(omSnapshotManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); + when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(snapshotLocalDataManager); + when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java index 24fdc138fd72..f8d40951b2bf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -83,9 +84,11 @@ public void close() { }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); + OmSnapshotLocalDataManager omSnapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - when(omSnapshotManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); + when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(omSnapshotLocalDataManager); + when(omSnapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } From 252d338d168e4eefa13b2fe48d5e14b5fb81b125 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 22:16:37 -0400 Subject: [PATCH 003/126] HDDS-13627. In memory Manager for Snapshot Local Data Change-Id: I34536ff06efb7d5a4942853f0fd83942ab398b5f --- hadoop-hdds/common/pom.xml | 4 + .../apache/hadoop/ozone/util/Checksum.java | 24 +++ .../hadoop/ozone/util/ObjectSerializer.java | 65 +++++++++ .../hadoop/ozone/util/YamlSerializer.java | 138 ++++++++++++++++++ .../hadoop/ozone/om/OmSnapshotLocalData.java | 8 +- .../ozone/om/OmSnapshotLocalDataYaml.java | 128 +--------------- .../hadoop/ozone/om/OmSnapshotManager.java | 2 +- .../snapshot/OmSnapshotLocalDataManager.java | 78 +++++----- .../ozone/om/TestOmSnapshotLocalDataYaml.java | 59 ++++---- .../ozone/om/TestOmSnapshotManager.java | 3 +- .../TestOMSnapshotCreateResponse.java | 17 --- .../TestOMSnapshotDeleteResponse.java | 17 --- 12 files changed, 319 insertions(+), 224 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 0216b808a7cf..f22aeda491ac 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -108,6 +108,10 @@ org.apache.commons commons-lang3 + + org.apache.commons + commons-pool2 + org.apache.hadoop hadoop-common diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java new file mode 100644 index 000000000000..03e0559a6d3d --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +import org.apache.hadoop.hdds.utils.db.CopyObject; + +public interface Checksum> extends CopyObject { + String getChecksum(); +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java new file mode 100644 index 000000000000..b9727d559148 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; + +public interface ObjectSerializer extends Closeable { + + /** + * Loads an object of type T from the specified file. + * + * @param path the file from which the object will be loaded + * @return the object of type T that has been deserialized from the file + * @throws IOException if an I/O error occurs during reading from the file + */ + T load(File path) throws IOException; + + /** + * Loads an object of type T from the specified input stream. + * + * @param inputStream the input stream from which the object will be deserialized + * @return the deserialized object of type T + * @throws IOException if an I/O error occurs during reading from the input stream + */ + T load(InputStream inputStream) throws IOException; + + /** + * Serializes the given data object of type T and saves it to the specified file. + * + * @param path the file where the serialized object will be saved + * @param data the object of type T to be serialized and saved + * @throws IOException if an I/O error occurs during writing to the file + */ + void save(File path, T data) throws IOException; + + /** + * Verifies the checksum of the provided data object of type T. + * + * @param data the object of type T whose checksum is to be verified + * @return true if the checksum of the data is valid, false otherwise + * @throws IOException if an I/O error occurs during verification + */ + boolean verifyChecksum(T data) throws IOException; + + @Override + void close() throws IOException; +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java new file mode 100644 index 000000000000..07a128044b00 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.util; + +import com.google.common.base.Preconditions; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import org.apache.commons.pool2.BasePooledObjectFactory; +import org.apache.commons.pool2.impl.GenericObjectPool; +import org.apache.hadoop.hdds.server.YamlUtils; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.Yaml; + +public abstract class YamlSerializer> implements ObjectSerializer { + + private static final Logger LOG = LoggerFactory.getLogger(YamlSerializer.class); + + private final GenericObjectPool yamlPool; + + public YamlSerializer(BasePooledObjectFactory yamlFactory) { + this.yamlPool = new GenericObjectPool<>(yamlFactory); + } + + private UncheckedAutoCloseableSupplier getYaml() throws IOException { + try { + Yaml yaml = yamlPool.borrowObject(); + return new UncheckedAutoCloseableSupplier() { + + @Override + public void close() { + yamlPool.returnObject(yaml); + } + + @Override + public Yaml get() { + return yaml; + } + }; + } catch (Exception e) { + throw new IOException("Failed to get yaml object.", e); + } + } + + @Override + public T load(File yamlFile) throws IOException { + Preconditions.checkNotNull(yamlFile, "yamlFile cannot be null"); + try (InputStream inputFileStream = Files.newInputStream(yamlFile.toPath())) { + return load(inputFileStream); + } + } + + @Override + public T load(InputStream input) throws IOException{ + T dataYaml; + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + dataYaml = yaml.get().load(input); + } catch (Exception e) { + throw new IOException("Failed to load file", e); + } + + if (dataYaml == null) { + // If Yaml#load returned null, then the file is empty. This is valid yaml + // but considered an error in this case since we have lost data about + // the snapshot. + throw new IOException("Failed to load file. File is empty."); + } + + return dataYaml; + } + + @Override + public boolean verifyChecksum(T data) throws IOException { + Preconditions.checkNotNull(data, "data cannot be null"); + + // Get the stored checksum + String storedChecksum = data.getChecksum(); + if (storedChecksum == null) { + LOG.warn("No checksum found in snapshot data for verification"); + return false; + } + + // Create a copy of the snapshot data for computing checksum + T copy = data.copyObject(); + + // Get the YAML representation + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + // Compute new checksum + computeAndSetChecksum(yaml.get(), copy); + + // Compare the stored and computed checksums + String computedChecksum = copy.getChecksum(); + boolean isValid = storedChecksum.equals(computedChecksum); + + if (!isValid) { + LOG.warn("Checksum verification failed for snapshot local data. " + + "Stored: {}, Computed: {}", storedChecksum, computedChecksum); + } + return isValid; + } + } + + @Override + public void save(File yamlFile, T data) throws IOException { + // Create Yaml + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + // Compute Checksum and update SnapshotData + computeAndSetChecksum(yaml.get(), data); + // Write the object with checksum to Yaml file. + YamlUtils.dump(yaml.get(), data, yamlFile, LOG); + } + } + + @Override + public void close() throws IOException { + + } + + public abstract void computeAndSetChecksum(Yaml yaml, T data) throws IOException; + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5f65fd4c0d08..e82bad8832a5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -31,6 +31,7 @@ import java.util.stream.Collectors; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.utils.db.CopyObject; +import org.apache.hadoop.ozone.util.Checksum; import org.apache.ozone.compaction.log.SstFileInfo; import org.rocksdb.LiveFileMetaData; import org.yaml.snakeyaml.Yaml; @@ -39,7 +40,7 @@ * OmSnapshotLocalData is the in-memory representation of snapshot local metadata. * Inspired by org.apache.hadoop.ozone.container.common.impl.ContainerData */ -public abstract class OmSnapshotLocalData { +public class OmSnapshotLocalData implements Checksum { // Unique identifier for the snapshot. This is used to identify the snapshot. private UUID snapshotId; @@ -258,6 +259,11 @@ public void setVersion(int version) { this.version = version; } + @Override + public OmSnapshotLocalData copyObject() { + return new OmSnapshotLocalData(this); + } + /** * Represents metadata for a specific version in a snapshot. * This class maintains the version of the previous snapshot and a list of SST (Sorted String Table) files diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index a3683e11c16f..94632d7385d6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -31,6 +31,7 @@ import org.apache.commons.pool2.impl.DefaultPooledObject; import org.apache.hadoop.hdds.server.YamlUtils; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.ozone.compaction.log.SstFileInfo; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; @@ -60,7 +61,7 @@ * Checksum of the YAML fields are computed and stored in the YAML file transparently to callers. * Inspired by org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml */ -public final class OmSnapshotLocalDataYaml extends OmSnapshotLocalData { +public final class OmSnapshotLocalDataYaml { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataYaml.class); @@ -69,61 +70,6 @@ public final class OmSnapshotLocalDataYaml extends OmSnapshotLocalData { public static final Tag SST_FILE_INFO_TAG = new Tag("SstFileInfo"); public static final String YAML_FILE_EXTENSION = ".yaml"; - /** - * Creates a new OmSnapshotLocalDataYaml with default values. - */ - public OmSnapshotLocalDataYaml(UUID snapshotId, List liveFileMetaDatas, UUID previousSnapshotId) { - super(snapshotId, liveFileMetaDatas, previousSnapshotId); - } - - /** - * Copy constructor to create a deep copy. - * @param source The source OmSnapshotLocalData to copy from - */ - public OmSnapshotLocalDataYaml(OmSnapshotLocalData source) { - super(source); - } - - /** - * Verifies the checksum of the snapshot data. - * @param snapshotData The snapshot data to verify - * @return true if the checksum is valid, false otherwise - * @throws IOException if there's an error computing the checksum - */ - public static boolean verifyChecksum(OmSnapshotLocalDataManager localDataManager, OmSnapshotLocalData snapshotData) - throws IOException { - Preconditions.checkNotNull(snapshotData, "snapshotData cannot be null"); - - // Get the stored checksum - String storedChecksum = snapshotData.getChecksum(); - if (storedChecksum == null) { - LOG.warn("No checksum found in snapshot data for verification"); - return false; - } - - // Create a copy of the snapshot data for computing checksum - OmSnapshotLocalDataYaml snapshotDataCopy = new OmSnapshotLocalDataYaml(snapshotData); - - // Clear the existing checksum in the copy - snapshotDataCopy.setChecksum(null); - - // Get the YAML representation - try (UncheckedAutoCloseableSupplier yaml = localDataManager.getSnapshotLocalYaml()) { - // Compute new checksum - snapshotDataCopy.computeAndSetChecksum(yaml.get()); - - // Compare the stored and computed checksums - String computedChecksum = snapshotDataCopy.getChecksum(); - boolean isValid = storedChecksum.equals(computedChecksum); - - if (!isValid) { - LOG.warn("Checksum verification failed for snapshot local data. " + - "Stored: {}, Computed: {}", storedChecksum, computedChecksum); - } - return isValid; - } - } - /** * Representer class to define which fields need to be stored in yaml file. */ @@ -131,7 +77,7 @@ private static class OmSnapshotLocalDataRepresenter extends Representer { OmSnapshotLocalDataRepresenter(DumperOptions options) { super(options); - this.addClassTag(OmSnapshotLocalDataYaml.class, SNAPSHOT_YAML_TAG); + this.addClassTag(OmSnapshotLocalData.class, SNAPSHOT_YAML_TAG); this.addClassTag(VersionMeta.class, SNAPSHOT_VERSION_META_TAG); this.addClassTag(SstFileInfo.class, SST_FILE_INFO_TAG); representers.put(SstFileInfo.class, new RepresentSstFileInfo()); @@ -192,7 +138,7 @@ private static class SnapshotLocalDataConstructor extends SafeConstructor { this.yamlConstructors.put(SNAPSHOT_YAML_TAG, new ConstructSnapshotLocalData()); this.yamlConstructors.put(SNAPSHOT_VERSION_META_TAG, new ConstructVersionMeta()); this.yamlConstructors.put(SST_FILE_INFO_TAG, new ConstructSstFileInfo()); - TypeDescription omDesc = new TypeDescription(OmSnapshotLocalDataYaml.class); + TypeDescription omDesc = new TypeDescription(OmSnapshotLocalData.class); omDesc.putMapPropertyType(OzoneConsts.OM_SLD_VERSION_SST_FILE_INFO, Integer.class, VersionMeta.class); this.addTypeDescription(omDesc); TypeDescription versionMetaDesc = new TypeDescription(VersionMeta.class); @@ -231,7 +177,7 @@ public Object construct(Node node) { Map nodes = constructMapping(mnode); UUID snapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_SNAP_ID)); UUID prevSnapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID)); - OmSnapshotLocalDataYaml snapshotLocalData = new OmSnapshotLocalDataYaml(snapId, Collections.emptyList(), + OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId); // Set version from YAML @@ -269,70 +215,6 @@ public Object construct(Node node) { } } - /** - * Returns the YAML representation of this object as a String - * (without triggering checksum computation or persistence). - * @return YAML string representation - */ - public String getYaml(OmSnapshotLocalDataManager snapshotLocalDataManager) throws IOException { - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - return yaml.get().dump(this); - } - } - - /** - * Computes checksum (stored in this object), and writes this object to a YAML file. - * @param yamlFile The file to write to - * @throws IOException If there's an error writing to the file - */ - public void writeToYaml(OmSnapshotLocalDataManager snapshotLocalDataManager, File yamlFile) throws IOException { - // Create Yaml - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - // Compute Checksum and update SnapshotData - computeAndSetChecksum(yaml.get()); - // Write the SnapshotData with checksum to Yaml file. - YamlUtils.dump(yaml.get(), this, yamlFile, LOG); - } - } - - /** - * Creates a OmSnapshotLocalDataYaml instance from a YAML file. - * @param yamlFile The YAML file to read from - * @return A new OmSnapshotLocalDataYaml instance - * @throws IOException If there's an error reading the file - */ - public static OmSnapshotLocalDataYaml getFromYamlFile(OmSnapshotLocalDataManager snapshotLocalDataManager, - File yamlFile) throws IOException { - Preconditions.checkNotNull(yamlFile, "yamlFile cannot be null"); - try (InputStream inputFileStream = Files.newInputStream(yamlFile.toPath())) { - return getFromYamlStream(snapshotLocalDataManager, inputFileStream); - } - } - - /** - * Read the YAML content InputStream, and return OmSnapshotLocalDataYaml instance. - * @throws IOException - */ - public static OmSnapshotLocalDataYaml getFromYamlStream(OmSnapshotLocalDataManager snapshotLocalDataManager, - InputStream input) throws IOException { - OmSnapshotLocalDataYaml dataYaml; - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - dataYaml = yaml.get().load(input); - } catch (YAMLException ex) { - // Unchecked exception. Convert to IOException - throw new IOException(ex); - } - - if (dataYaml == null) { - // If Yaml#load returned null, then the file is empty. This is valid yaml - // but considered an error in this case since we have lost data about - // the snapshot. - throw new IOException("Failed to load snapshot file. File is empty."); - } - - return dataYaml; - } - /** * Factory class for constructing and pooling instances of the Yaml object. * This class extends BasePooledObjectFactory to support object pooling, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 19fe367bb923..ac59c43c0580 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -195,7 +195,7 @@ public final class OmSnapshotManager implements AutoCloseable { private int fsSnapshotMaxLimit; private final AtomicInteger inFlightSnapshotCount = new AtomicInteger(0); - public OmSnapshotManager(OzoneManager ozoneManager) { + public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager()); boolean isFilesystemSnapshotEnabled = ozoneManager.isFilesystemSnapshotEnabled(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index fb6d7cf744a9..bd69a5f2b8c2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; +import com.google.common.annotations.VisibleForTesting; import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; import java.io.File; @@ -28,13 +29,16 @@ import java.nio.file.Paths; import java.util.Objects; import java.util.UUID; -import org.apache.commons.pool2.impl.GenericObjectPool; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.apache.hadoop.ozone.util.ObjectSerializer; +import org.apache.hadoop.ozone.util.YamlSerializer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; /** @@ -43,14 +47,23 @@ */ public class OmSnapshotLocalDataManager implements AutoCloseable { - private final GenericObjectPool yamlPool; + private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); + + private final ObjectSerializer snapshotLocalDataSerializer; private final MutableGraph localDataGraph; private final OMMetadataManager omMetadataManager; - public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) { - this.yamlPool = new GenericObjectPool(new OmSnapshotLocalDataYaml.YamlFactory()); + public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) throws IOException { this.localDataGraph = GraphBuilder.directed().build(); this.omMetadataManager = omMetadataManager; + this.snapshotLocalDataSerializer = new YamlSerializer( + new OmSnapshotLocalDataYaml.YamlFactory()) { + + @Override + public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IOException { + data.computeAndSetChecksum(yaml); + } + }; init(); } @@ -84,30 +97,47 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf Path snapshotLocalDataPath = Paths.get( getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); Files.deleteIfExists(snapshotLocalDataPath); - OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(snapshotInfo.getSnapshotId(), + OmSnapshotLocalData snapshotLocalDataYaml = new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); - snapshotLocalDataYaml.writeToYaml(this, snapshotLocalDataPath.toFile()); + snapshotLocalDataSerializer.save(snapshotLocalDataPath.toFile(), snapshotLocalDataYaml); + } + + public OmSnapshotLocalData getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { + Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotInfo)); + return snapshotLocalDataSerializer.load(snapshotLocalDataPath.toFile()); + } + + public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws IOException { + return snapshotLocalDataSerializer.load(snapshotDataPath); } - private void init() { + private void init() throws IOException { RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); File snapshotDir = new File(store.getSnapshotsParentDir()); - for (File yamlFile : - Objects.requireNonNull(snapshotDir.listFiles( - (dir, name) -> name.startsWith(checkpointPrefix) && name.endsWith(YAML_FILE_EXTENSION)))) { + File[] yamlFiles = snapshotDir.listFiles( + (dir, name) -> name.startsWith(checkpointPrefix) && name.endsWith(YAML_FILE_EXTENSION)); + if (yamlFiles == null) { + throw new IOException("Error while listing yaml files inside directory: " + snapshotDir.getAbsolutePath()); + } + for (File yamlFile : yamlFiles) { System.out.println(yamlFile.getAbsolutePath()); } } + @Override public void close() { - if (yamlPool != null) { - yamlPool.close(); + if (snapshotLocalDataSerializer != null) { + try { + snapshotLocalDataSerializer.close(); + } catch (IOException e) { + LOG.error("Failed to close snapshot local data serializer", e); + } } } - private final class VersionLocalDataNode { + private static final class VersionLocalDataNode { private UUID snapshotId; private int version; private UUID previousSnapshotId; @@ -137,24 +167,4 @@ public int hashCode() { } } - public UncheckedAutoCloseableSupplier getSnapshotLocalYaml() throws IOException { - try { - Yaml yaml = yamlPool.borrowObject(); - return new UncheckedAutoCloseableSupplier() { - - @Override - public void close() { - yamlPool.returnObject(yaml); - } - - @Override - public Yaml get() { - return yaml; - } - }; - } catch (Exception e) { - throw new IOException("Failed to get snapshot local yaml", e); - } - } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index 71933f8112c4..34435366781a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -45,8 +45,10 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.util.ObjectSerializer; +import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ozone.compaction.log.SstFileInfo; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -60,26 +62,26 @@ public class TestOmSnapshotLocalDataYaml { private static String testRoot = new FileSystemTestHelper().getTestRootDir(); - private static OmSnapshotLocalDataManager snapshotLocalDataManager; - private static final Yaml YAML = new OmSnapshotLocalDataYaml.YamlFactory().create(); - private static final UncheckedAutoCloseableSupplier YAML_SUPPLIER = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return YAML; - } - - @Override - public void close() { - - } - }; + private static final OmSnapshotLocalDataYaml.YamlFactory yamlFactory = new OmSnapshotLocalDataYaml.YamlFactory(); + private static ObjectSerializer omSnapshotLocalDataSerializer; private static final Instant NOW = Instant.now(); @BeforeAll - public static void setupClassMocks() throws IOException { - snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); - when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(YAML_SUPPLIER); + public static void setupSerializer() throws IOException { + omSnapshotLocalDataSerializer = new YamlSerializer(yamlFactory) { + @Override + public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IOException { + data.computeAndSetChecksum(yaml); + } + }; + } + + @AfterAll + public static void cleanupSerializer() throws IOException { + if (omSnapshotLocalDataSerializer != null) { + omSnapshotLocalDataSerializer.close(); + } } @BeforeEach @@ -113,7 +115,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw createLiveFileMetaData("sst1", "table1", "k1", "k2"), createLiveFileMetaData("sst2", "table1", "k3", "k4"), createLiveFileMetaData("sst3", "table2", "k4", "k5")); - OmSnapshotLocalDataYaml dataYaml = new OmSnapshotLocalDataYaml(snapshotId, notDefraggedSSTFileList, + OmSnapshotLocalData dataYaml = new OmSnapshotLocalData(snapshotId, notDefraggedSSTFileList, previousSnapshotId); // Set version @@ -138,7 +140,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw File yamlFile = new File(testRoot, yamlFilePath); // Create YAML file with SnapshotData - dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); + omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); // Check YAML file exists assertTrue(yamlFile.exists()); @@ -154,7 +156,7 @@ public void testWriteToYaml() throws IOException { UUID prevSnapId = yamlFilePrevIdPair.getRight(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData snapshotData = omSnapshotLocalDataSerializer.load(yamlFile); // Verify fields assertEquals(44, snapshotData.getVersion()); @@ -194,8 +196,8 @@ public void testUpdateSnapshotDataFile() throws IOException { Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file - OmSnapshotLocalDataYaml dataYaml = - OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData dataYaml = + omSnapshotLocalDataSerializer.load(yamlFile); // Update snapshot data dataYaml.setSstFiltered(false); @@ -204,10 +206,10 @@ public void testUpdateSnapshotDataFile() throws IOException { singletonList(new SstFileInfo("defragged-sst4", "k5", "k6", "table3")), 5); // Write updated data back to file - dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); + omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); // Read back the updated data - dataYaml = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + dataYaml = omSnapshotLocalDataSerializer.load(yamlFile); // Verify updated data assertThat(dataYaml.getSstFiltered()).isFalse(); @@ -225,10 +227,9 @@ public void testEmptyFile() throws IOException { File emptyFile = new File(testRoot, "empty.yaml"); assertTrue(emptyFile.createNewFile()); - IOException ex = assertThrows(IOException.class, () -> - OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, emptyFile)); + IOException ex = assertThrows(IOException.class, () -> omSnapshotLocalDataSerializer.load(emptyFile)); - assertThat(ex).hasMessageContaining("Failed to load snapshot file. File is empty."); + assertThat(ex).hasMessageContaining("Failed to load file. File is empty."); } @Test @@ -237,7 +238,7 @@ public void testChecksum() throws IOException { Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData snapshotData = omSnapshotLocalDataSerializer.load(yamlFile); // Get the original checksum String originalChecksum = snapshotData.getChecksum(); @@ -245,7 +246,7 @@ public void testChecksum() throws IOException { // Verify the checksum is not null or empty assertThat(originalChecksum).isNotNull().isNotEmpty(); - assertTrue(OmSnapshotLocalDataYaml.verifyChecksum(snapshotLocalDataManager, snapshotData)); + assertTrue(omSnapshotLocalDataSerializer.verifyChecksum(snapshotData)); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 62f9561d2b83..7f808df3f978 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -324,8 +324,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { assertTrue(Files.exists(snapshotYaml)); assertTrue(Files.size(snapshotYaml) > 0); // Verify the contents of the YAML file - OmSnapshotLocalData localData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, - snapshotYaml.toFile()); + OmSnapshotLocalData localData = snapshotLocalDataManager.getOmSnapshotLocalData(snapshotYaml.toFile()); assertNotNull(localData); assertEquals(0, localData.getVersion()); assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index ce24040a3eab..2cafae138fd4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -51,13 +50,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Time; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import org.yaml.snakeyaml.Yaml; /** * This class tests OMSnapshotCreateResponse. @@ -77,26 +74,12 @@ public void setup() throws Exception { String fsPath = folder.getAbsolutePath(); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, fsPath); - OmSnapshotLocalDataYaml.YamlFactory yamlFactory = new OmSnapshotLocalDataYaml.YamlFactory(); - Yaml yaml = yamlFactory.create(); - UncheckedAutoCloseableSupplier yamlSupplier = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return yaml; - } - - @Override - public void close() { - - } - }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); OmSnapshotLocalDataManager snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(snapshotLocalDataManager); - when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java index f8d40951b2bf..2d5d7b2870f7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -44,12 +43,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import org.yaml.snakeyaml.Yaml; /** * This class tests OMSnapshotDeleteResponse. @@ -69,26 +66,12 @@ public void setup() throws Exception { String fsPath = folder.toAbsolutePath().toString(); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, fsPath); - OmSnapshotLocalDataYaml.YamlFactory yamlFactory = new OmSnapshotLocalDataYaml.YamlFactory(); - Yaml yaml = yamlFactory.create(); - UncheckedAutoCloseableSupplier yamlSupplier = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return yaml; - } - - @Override - public void close() { - - } - }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); OmSnapshotLocalDataManager omSnapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(omSnapshotLocalDataManager); - when(omSnapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } From 4099bc687ae22d6d4d7be6680d543abd01f1fd61 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 22:16:37 -0400 Subject: [PATCH 004/126] HDDS-13767. Refactor SnapshotLocalDataYaml related code into OmSnapshotLocalDataManager Change-Id: I34536ff06efb7d5a4942853f0fd83942ab398b5f --- hadoop-hdds/common/pom.xml | 4 + .../apache/hadoop/ozone/util/Checksum.java | 28 +++ .../hadoop/ozone/util/ObjectSerializer.java | 73 ++++++++ .../hadoop/ozone/util/YamlSerializer.java | 159 ++++++++++++++++++ .../hadoop/ozone/om/OmSnapshotLocalData.java | 8 +- .../ozone/om/OmSnapshotLocalDataYaml.java | 141 +--------------- .../snapshot/OmSnapshotLocalDataManager.java | 104 ++++-------- .../ozone/om/TestOmSnapshotLocalDataYaml.java | 60 +++---- .../ozone/om/TestOmSnapshotManager.java | 3 +- .../TestOMSnapshotCreateResponse.java | 17 -- .../TestOMSnapshotDeleteResponse.java | 17 -- 11 files changed, 338 insertions(+), 276 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 0216b808a7cf..f22aeda491ac 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -108,6 +108,10 @@ org.apache.commons commons-lang3 + + org.apache.commons + commons-pool2 + org.apache.hadoop hadoop-common diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java new file mode 100644 index 000000000000..4d11bde5aef3 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +import org.apache.hadoop.hdds.utils.db.CopyObject; + +/** + * Represents a generic interface for objects capable of generating or providing + * a checksum value. + */ +public interface Checksum> extends CopyObject { + String getChecksum(); +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java new file mode 100644 index 000000000000..b861ad93fdfb --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; + +/** + * Represents a generic interface for serialization and deserialization + * operations of objects that extend the {@link Checksum} interface. + * This interface provides functionality for loading and saving objects + * from/to files or input streams, as well as verifying checksum integrity. + * + * @param the type of the object handled by the serializer, must extend {@code Checksum} + */ +public interface ObjectSerializer extends Closeable { + + /** + * Loads an object of type T from the specified file. + * + * @param path the file from which the object will be loaded + * @return the object of type T that has been deserialized from the file + * @throws IOException if an I/O error occurs during reading from the file + */ + T load(File path) throws IOException; + + /** + * Loads an object of type T from the specified input stream. + * + * @param inputStream the input stream from which the object will be deserialized + * @return the deserialized object of type T + * @throws IOException if an I/O error occurs during reading from the input stream + */ + T load(InputStream inputStream) throws IOException; + + /** + * Serializes the given data object of type T and saves it to the specified file. + * + * @param path the file where the serialized object will be saved + * @param data the object of type T to be serialized and saved + * @throws IOException if an I/O error occurs during writing to the file + */ + void save(File path, T data) throws IOException; + + /** + * Verifies the checksum of the provided data object of type T. + * + * @param data the object of type T whose checksum is to be verified + * @return true if the checksum of the data is valid, false otherwise + * @throws IOException if an I/O error occurs during verification + */ + boolean verifyChecksum(T data) throws IOException; + + @Override + void close() throws IOException; +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java new file mode 100644 index 000000000000..32aeb928f172 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +import com.google.common.base.Preconditions; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import org.apache.commons.pool2.BasePooledObjectFactory; +import org.apache.commons.pool2.impl.GenericObjectPool; +import org.apache.hadoop.hdds.server.YamlUtils; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.Yaml; + +/** + * An abstract serializer for objects that extend the {@link Checksum} interface. + * This class provides mechanisms for serializing and deserializing objects + * in a YAML format. + */ +public abstract class YamlSerializer> implements ObjectSerializer { + + private static final Logger LOG = LoggerFactory.getLogger(YamlSerializer.class); + + private final GenericObjectPool yamlPool; + + public YamlSerializer(BasePooledObjectFactory yamlFactory) { + this.yamlPool = new GenericObjectPool<>(yamlFactory); + } + + private UncheckedAutoCloseableSupplier getYaml() throws IOException { + try { + Yaml yaml = yamlPool.borrowObject(); + return new UncheckedAutoCloseableSupplier() { + + @Override + public void close() { + yamlPool.returnObject(yaml); + } + + @Override + public Yaml get() { + return yaml; + } + }; + } catch (Exception e) { + throw new IOException("Failed to get yaml object.", e); + } + } + + /** + * {@inheritDoc} + */ + @Override + public T load(File yamlFile) throws IOException { + Preconditions.checkNotNull(yamlFile, "yamlFile cannot be null"); + try (InputStream inputFileStream = Files.newInputStream(yamlFile.toPath())) { + return load(inputFileStream); + } + } + + /** + * {@inheritDoc} + */ + @Override + public T load(InputStream input) throws IOException { + T dataYaml; + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + dataYaml = yaml.get().load(input); + } catch (Exception e) { + throw new IOException("Failed to load file", e); + } + + if (dataYaml == null) { + // If Yaml#load returned null, then the file is empty. This is valid yaml + // but considered an error in this case since we have lost data about + // the snapshot. + throw new IOException("Failed to load file. File is empty."); + } + + return dataYaml; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean verifyChecksum(T data) throws IOException { + Preconditions.checkNotNull(data, "data cannot be null"); + + // Get the stored checksum + String storedChecksum = data.getChecksum(); + if (storedChecksum == null) { + LOG.warn("No checksum found in snapshot data for verification"); + return false; + } + + // Create a copy of the snapshot data for computing checksum + T copy = data.copyObject(); + + // Get the YAML representation + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + // Compute new checksum + computeAndSetChecksum(yaml.get(), copy); + + // Compare the stored and computed checksums + String computedChecksum = copy.getChecksum(); + boolean isValid = storedChecksum.equals(computedChecksum); + + if (!isValid) { + LOG.warn("Checksum verification failed for snapshot local data. " + + "Stored: {}, Computed: {}", storedChecksum, computedChecksum); + } + return isValid; + } + } + + /** + * {@inheritDoc} + */ + @Override + public void save(File yamlFile, T data) throws IOException { + // Create Yaml + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + // Compute Checksum and update SnapshotData + computeAndSetChecksum(yaml.get(), data); + // Write the object with checksum to Yaml file. + YamlUtils.dump(yaml.get(), data, yamlFile, LOG); + } + } + + /** + * {@inheritDoc} + */ + @Override + public void close() { + yamlPool.close(); + } + + public abstract void computeAndSetChecksum(Yaml yaml, T data) throws IOException; + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5f65fd4c0d08..e82bad8832a5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -31,6 +31,7 @@ import java.util.stream.Collectors; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.utils.db.CopyObject; +import org.apache.hadoop.ozone.util.Checksum; import org.apache.ozone.compaction.log.SstFileInfo; import org.rocksdb.LiveFileMetaData; import org.yaml.snakeyaml.Yaml; @@ -39,7 +40,7 @@ * OmSnapshotLocalData is the in-memory representation of snapshot local metadata. * Inspired by org.apache.hadoop.ozone.container.common.impl.ContainerData */ -public abstract class OmSnapshotLocalData { +public class OmSnapshotLocalData implements Checksum { // Unique identifier for the snapshot. This is used to identify the snapshot. private UUID snapshotId; @@ -258,6 +259,11 @@ public void setVersion(int version) { this.version = version; } + @Override + public OmSnapshotLocalData copyObject() { + return new OmSnapshotLocalData(this); + } + /** * Represents metadata for a specific version in a snapshot. * This class maintains the version of the previous snapshot and a list of SST (Sorted String Table) files diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index a3683e11c16f..543c4c6397cc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -17,11 +17,6 @@ package org.apache.hadoop.ozone.om; -import com.google.common.base.Preconditions; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; import java.util.Collections; import java.util.List; import java.util.Map; @@ -29,21 +24,15 @@ import org.apache.commons.pool2.BasePooledObjectFactory; import org.apache.commons.pool2.PooledObject; import org.apache.commons.pool2.impl.DefaultPooledObject; -import org.apache.hadoop.hdds.server.YamlUtils; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.ozone.compaction.log.SstFileInfo; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; -import org.rocksdb.LiveFileMetaData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.DumperOptions; import org.yaml.snakeyaml.LoaderOptions; import org.yaml.snakeyaml.TypeDescription; import org.yaml.snakeyaml.Yaml; import org.yaml.snakeyaml.constructor.AbstractConstruct; import org.yaml.snakeyaml.constructor.SafeConstructor; -import org.yaml.snakeyaml.error.YAMLException; import org.yaml.snakeyaml.introspector.BeanAccess; import org.yaml.snakeyaml.introspector.Property; import org.yaml.snakeyaml.introspector.PropertyUtils; @@ -60,68 +49,14 @@ * Checksum of the YAML fields are computed and stored in the YAML file transparently to callers. * Inspired by org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml */ -public final class OmSnapshotLocalDataYaml extends OmSnapshotLocalData { - - private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataYaml.class); +public final class OmSnapshotLocalDataYaml { public static final Tag SNAPSHOT_YAML_TAG = new Tag("OmSnapshotLocalData"); public static final Tag SNAPSHOT_VERSION_META_TAG = new Tag("VersionMeta"); public static final Tag SST_FILE_INFO_TAG = new Tag("SstFileInfo"); public static final String YAML_FILE_EXTENSION = ".yaml"; - /** - * Creates a new OmSnapshotLocalDataYaml with default values. - */ - public OmSnapshotLocalDataYaml(UUID snapshotId, List liveFileMetaDatas, UUID previousSnapshotId) { - super(snapshotId, liveFileMetaDatas, previousSnapshotId); - } - - /** - * Copy constructor to create a deep copy. - * @param source The source OmSnapshotLocalData to copy from - */ - public OmSnapshotLocalDataYaml(OmSnapshotLocalData source) { - super(source); - } - - /** - * Verifies the checksum of the snapshot data. - * @param snapshotData The snapshot data to verify - * @return true if the checksum is valid, false otherwise - * @throws IOException if there's an error computing the checksum - */ - public static boolean verifyChecksum(OmSnapshotLocalDataManager localDataManager, OmSnapshotLocalData snapshotData) - throws IOException { - Preconditions.checkNotNull(snapshotData, "snapshotData cannot be null"); - - // Get the stored checksum - String storedChecksum = snapshotData.getChecksum(); - if (storedChecksum == null) { - LOG.warn("No checksum found in snapshot data for verification"); - return false; - } - - // Create a copy of the snapshot data for computing checksum - OmSnapshotLocalDataYaml snapshotDataCopy = new OmSnapshotLocalDataYaml(snapshotData); - - // Clear the existing checksum in the copy - snapshotDataCopy.setChecksum(null); - - // Get the YAML representation - try (UncheckedAutoCloseableSupplier yaml = localDataManager.getSnapshotLocalYaml()) { - // Compute new checksum - snapshotDataCopy.computeAndSetChecksum(yaml.get()); - - // Compare the stored and computed checksums - String computedChecksum = snapshotDataCopy.getChecksum(); - boolean isValid = storedChecksum.equals(computedChecksum); - - if (!isValid) { - LOG.warn("Checksum verification failed for snapshot local data. " + - "Stored: {}, Computed: {}", storedChecksum, computedChecksum); - } - return isValid; - } + private OmSnapshotLocalDataYaml() { } /** @@ -131,7 +66,7 @@ private static class OmSnapshotLocalDataRepresenter extends Representer { OmSnapshotLocalDataRepresenter(DumperOptions options) { super(options); - this.addClassTag(OmSnapshotLocalDataYaml.class, SNAPSHOT_YAML_TAG); + this.addClassTag(OmSnapshotLocalData.class, SNAPSHOT_YAML_TAG); this.addClassTag(VersionMeta.class, SNAPSHOT_VERSION_META_TAG); this.addClassTag(SstFileInfo.class, SST_FILE_INFO_TAG); representers.put(SstFileInfo.class, new RepresentSstFileInfo()); @@ -192,7 +127,7 @@ private static class SnapshotLocalDataConstructor extends SafeConstructor { this.yamlConstructors.put(SNAPSHOT_YAML_TAG, new ConstructSnapshotLocalData()); this.yamlConstructors.put(SNAPSHOT_VERSION_META_TAG, new ConstructVersionMeta()); this.yamlConstructors.put(SST_FILE_INFO_TAG, new ConstructSstFileInfo()); - TypeDescription omDesc = new TypeDescription(OmSnapshotLocalDataYaml.class); + TypeDescription omDesc = new TypeDescription(OmSnapshotLocalData.class); omDesc.putMapPropertyType(OzoneConsts.OM_SLD_VERSION_SST_FILE_INFO, Integer.class, VersionMeta.class); this.addTypeDescription(omDesc); TypeDescription versionMetaDesc = new TypeDescription(VersionMeta.class); @@ -231,7 +166,7 @@ public Object construct(Node node) { Map nodes = constructMapping(mnode); UUID snapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_SNAP_ID)); UUID prevSnapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID)); - OmSnapshotLocalDataYaml snapshotLocalData = new OmSnapshotLocalDataYaml(snapId, Collections.emptyList(), + OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId); // Set version from YAML @@ -269,70 +204,6 @@ public Object construct(Node node) { } } - /** - * Returns the YAML representation of this object as a String - * (without triggering checksum computation or persistence). - * @return YAML string representation - */ - public String getYaml(OmSnapshotLocalDataManager snapshotLocalDataManager) throws IOException { - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - return yaml.get().dump(this); - } - } - - /** - * Computes checksum (stored in this object), and writes this object to a YAML file. - * @param yamlFile The file to write to - * @throws IOException If there's an error writing to the file - */ - public void writeToYaml(OmSnapshotLocalDataManager snapshotLocalDataManager, File yamlFile) throws IOException { - // Create Yaml - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - // Compute Checksum and update SnapshotData - computeAndSetChecksum(yaml.get()); - // Write the SnapshotData with checksum to Yaml file. - YamlUtils.dump(yaml.get(), this, yamlFile, LOG); - } - } - - /** - * Creates a OmSnapshotLocalDataYaml instance from a YAML file. - * @param yamlFile The YAML file to read from - * @return A new OmSnapshotLocalDataYaml instance - * @throws IOException If there's an error reading the file - */ - public static OmSnapshotLocalDataYaml getFromYamlFile(OmSnapshotLocalDataManager snapshotLocalDataManager, - File yamlFile) throws IOException { - Preconditions.checkNotNull(yamlFile, "yamlFile cannot be null"); - try (InputStream inputFileStream = Files.newInputStream(yamlFile.toPath())) { - return getFromYamlStream(snapshotLocalDataManager, inputFileStream); - } - } - - /** - * Read the YAML content InputStream, and return OmSnapshotLocalDataYaml instance. - * @throws IOException - */ - public static OmSnapshotLocalDataYaml getFromYamlStream(OmSnapshotLocalDataManager snapshotLocalDataManager, - InputStream input) throws IOException { - OmSnapshotLocalDataYaml dataYaml; - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - dataYaml = yaml.get().load(input); - } catch (YAMLException ex) { - // Unchecked exception. Convert to IOException - throw new IOException(ex); - } - - if (dataYaml == null) { - // If Yaml#load returned null, then the file is empty. This is valid yaml - // but considered an error in this case since we have lost data about - // the snapshot. - throw new IOException("Failed to load snapshot file. File is empty."); - } - - return dataYaml; - } - /** * Factory class for constructing and pooling instances of the Yaml object. * This class extends BasePooledObjectFactory to support object pooling, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index fb6d7cf744a9..98536444a61c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -19,22 +19,21 @@ import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; -import com.google.common.graph.GraphBuilder; -import com.google.common.graph.MutableGraph; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.Objects; -import java.util.UUID; -import org.apache.commons.pool2.impl.GenericObjectPool; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.apache.hadoop.ozone.util.ObjectSerializer; +import org.apache.hadoop.ozone.util.YamlSerializer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; /** @@ -43,15 +42,21 @@ */ public class OmSnapshotLocalDataManager implements AutoCloseable { - private final GenericObjectPool yamlPool; - private final MutableGraph localDataGraph; + private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); + + private final ObjectSerializer snapshotLocalDataSerializer; private final OMMetadataManager omMetadataManager; public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) { - this.yamlPool = new GenericObjectPool(new OmSnapshotLocalDataYaml.YamlFactory()); - this.localDataGraph = GraphBuilder.directed().build(); this.omMetadataManager = omMetadataManager; - init(); + this.snapshotLocalDataSerializer = new YamlSerializer( + new OmSnapshotLocalDataYaml.YamlFactory()) { + + @Override + public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IOException { + data.computeAndSetChecksum(yaml); + } + }; } /** @@ -84,77 +89,28 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf Path snapshotLocalDataPath = Paths.get( getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); Files.deleteIfExists(snapshotLocalDataPath); - OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(snapshotInfo.getSnapshotId(), + OmSnapshotLocalData snapshotLocalDataYaml = new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); - snapshotLocalDataYaml.writeToYaml(this, snapshotLocalDataPath.toFile()); + snapshotLocalDataSerializer.save(snapshotLocalDataPath.toFile(), snapshotLocalDataYaml); } - private void init() { - RDBStore store = (RDBStore) omMetadataManager.getStore(); - String checkpointPrefix = store.getDbLocation().getName(); - File snapshotDir = new File(store.getSnapshotsParentDir()); - for (File yamlFile : - Objects.requireNonNull(snapshotDir.listFiles( - (dir, name) -> name.startsWith(checkpointPrefix) && name.endsWith(YAML_FILE_EXTENSION)))) { - System.out.println(yamlFile.getAbsolutePath()); - } + public OmSnapshotLocalData getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { + Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotInfo)); + return snapshotLocalDataSerializer.load(snapshotLocalDataPath.toFile()); } - @Override - public void close() { - if (yamlPool != null) { - yamlPool.close(); - } + public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws IOException { + return snapshotLocalDataSerializer.load(snapshotDataPath); } - private final class VersionLocalDataNode { - private UUID snapshotId; - private int version; - private UUID previousSnapshotId; - private int previousSnapshotVersion; - - private VersionLocalDataNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { - this.previousSnapshotId = previousSnapshotId; - this.previousSnapshotVersion = previousSnapshotVersion; - this.snapshotId = snapshotId; - this.version = version; - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof VersionLocalDataNode)) { - return false; + @Override + public void close() { + if (snapshotLocalDataSerializer != null) { + try { + snapshotLocalDataSerializer.close(); + } catch (IOException e) { + LOG.error("Failed to close snapshot local data serializer", e); } - - VersionLocalDataNode that = (VersionLocalDataNode) o; - return version == that.version && previousSnapshotVersion == that.previousSnapshotVersion && - snapshotId.equals(that.snapshotId) && Objects.equals(previousSnapshotId, that.previousSnapshotId); - } - - @Override - public int hashCode() { - return Objects.hash(snapshotId, version, previousSnapshotId, previousSnapshotVersion); - } - } - - public UncheckedAutoCloseableSupplier getSnapshotLocalYaml() throws IOException { - try { - Yaml yaml = yamlPool.borrowObject(); - return new UncheckedAutoCloseableSupplier() { - - @Override - public void close() { - yamlPool.returnObject(yaml); - } - - @Override - public Yaml get() { - return yaml; - } - }; - } catch (Exception e) { - throw new IOException("Failed to get snapshot local yaml", e); } } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index 71933f8112c4..23d332ae75b9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -44,9 +44,10 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; -import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.util.ObjectSerializer; +import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ozone.compaction.log.SstFileInfo; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -60,26 +61,26 @@ public class TestOmSnapshotLocalDataYaml { private static String testRoot = new FileSystemTestHelper().getTestRootDir(); - private static OmSnapshotLocalDataManager snapshotLocalDataManager; - private static final Yaml YAML = new OmSnapshotLocalDataYaml.YamlFactory().create(); - private static final UncheckedAutoCloseableSupplier YAML_SUPPLIER = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return YAML; - } - - @Override - public void close() { - - } - }; + private static final OmSnapshotLocalDataYaml.YamlFactory YAML_FACTORY = new OmSnapshotLocalDataYaml.YamlFactory(); + private static ObjectSerializer omSnapshotLocalDataSerializer; private static final Instant NOW = Instant.now(); @BeforeAll - public static void setupClassMocks() throws IOException { - snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); - when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(YAML_SUPPLIER); + public static void setupSerializer() throws IOException { + omSnapshotLocalDataSerializer = new YamlSerializer(YAML_FACTORY) { + @Override + public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IOException { + data.computeAndSetChecksum(yaml); + } + }; + } + + @AfterAll + public static void cleanupSerializer() throws IOException { + if (omSnapshotLocalDataSerializer != null) { + omSnapshotLocalDataSerializer.close(); + } } @BeforeEach @@ -113,7 +114,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw createLiveFileMetaData("sst1", "table1", "k1", "k2"), createLiveFileMetaData("sst2", "table1", "k3", "k4"), createLiveFileMetaData("sst3", "table2", "k4", "k5")); - OmSnapshotLocalDataYaml dataYaml = new OmSnapshotLocalDataYaml(snapshotId, notDefraggedSSTFileList, + OmSnapshotLocalData dataYaml = new OmSnapshotLocalData(snapshotId, notDefraggedSSTFileList, previousSnapshotId); // Set version @@ -138,7 +139,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw File yamlFile = new File(testRoot, yamlFilePath); // Create YAML file with SnapshotData - dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); + omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); // Check YAML file exists assertTrue(yamlFile.exists()); @@ -154,7 +155,7 @@ public void testWriteToYaml() throws IOException { UUID prevSnapId = yamlFilePrevIdPair.getRight(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData snapshotData = omSnapshotLocalDataSerializer.load(yamlFile); // Verify fields assertEquals(44, snapshotData.getVersion()); @@ -194,8 +195,8 @@ public void testUpdateSnapshotDataFile() throws IOException { Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file - OmSnapshotLocalDataYaml dataYaml = - OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData dataYaml = + omSnapshotLocalDataSerializer.load(yamlFile); // Update snapshot data dataYaml.setSstFiltered(false); @@ -204,10 +205,10 @@ public void testUpdateSnapshotDataFile() throws IOException { singletonList(new SstFileInfo("defragged-sst4", "k5", "k6", "table3")), 5); // Write updated data back to file - dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); + omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); // Read back the updated data - dataYaml = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + dataYaml = omSnapshotLocalDataSerializer.load(yamlFile); // Verify updated data assertThat(dataYaml.getSstFiltered()).isFalse(); @@ -225,10 +226,9 @@ public void testEmptyFile() throws IOException { File emptyFile = new File(testRoot, "empty.yaml"); assertTrue(emptyFile.createNewFile()); - IOException ex = assertThrows(IOException.class, () -> - OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, emptyFile)); + IOException ex = assertThrows(IOException.class, () -> omSnapshotLocalDataSerializer.load(emptyFile)); - assertThat(ex).hasMessageContaining("Failed to load snapshot file. File is empty."); + assertThat(ex).hasMessageContaining("Failed to load file. File is empty."); } @Test @@ -237,7 +237,7 @@ public void testChecksum() throws IOException { Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData snapshotData = omSnapshotLocalDataSerializer.load(yamlFile); // Get the original checksum String originalChecksum = snapshotData.getChecksum(); @@ -245,7 +245,7 @@ public void testChecksum() throws IOException { // Verify the checksum is not null or empty assertThat(originalChecksum).isNotNull().isNotEmpty(); - assertTrue(OmSnapshotLocalDataYaml.verifyChecksum(snapshotLocalDataManager, snapshotData)); + assertTrue(omSnapshotLocalDataSerializer.verifyChecksum(snapshotData)); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 62f9561d2b83..7f808df3f978 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -324,8 +324,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { assertTrue(Files.exists(snapshotYaml)); assertTrue(Files.size(snapshotYaml) > 0); // Verify the contents of the YAML file - OmSnapshotLocalData localData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, - snapshotYaml.toFile()); + OmSnapshotLocalData localData = snapshotLocalDataManager.getOmSnapshotLocalData(snapshotYaml.toFile()); assertNotNull(localData); assertEquals(0, localData.getVersion()); assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index ce24040a3eab..2cafae138fd4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -51,13 +50,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Time; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import org.yaml.snakeyaml.Yaml; /** * This class tests OMSnapshotCreateResponse. @@ -77,26 +74,12 @@ public void setup() throws Exception { String fsPath = folder.getAbsolutePath(); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, fsPath); - OmSnapshotLocalDataYaml.YamlFactory yamlFactory = new OmSnapshotLocalDataYaml.YamlFactory(); - Yaml yaml = yamlFactory.create(); - UncheckedAutoCloseableSupplier yamlSupplier = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return yaml; - } - - @Override - public void close() { - - } - }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); OmSnapshotLocalDataManager snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(snapshotLocalDataManager); - when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java index f8d40951b2bf..2d5d7b2870f7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -44,12 +43,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import org.yaml.snakeyaml.Yaml; /** * This class tests OMSnapshotDeleteResponse. @@ -69,26 +66,12 @@ public void setup() throws Exception { String fsPath = folder.toAbsolutePath().toString(); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, fsPath); - OmSnapshotLocalDataYaml.YamlFactory yamlFactory = new OmSnapshotLocalDataYaml.YamlFactory(); - Yaml yaml = yamlFactory.create(); - UncheckedAutoCloseableSupplier yamlSupplier = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return yaml; - } - - @Override - public void close() { - - } - }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); OmSnapshotLocalDataManager omSnapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(omSnapshotLocalDataManager); - when(omSnapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } From e02670c2cb18d98a4cd60a3690ca6a7a358afe5f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 22:37:30 -0400 Subject: [PATCH 005/126] HDDS-13767. Fix pmd Change-Id: I32bcaf2a1fb290f1790c02872a0230cd65586636 --- .../java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index e82bad8832a5..5af678f903fb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -194,6 +194,7 @@ public void addVersionSSTFileInfos(List sstFiles, int previousSnaps * Returns the checksum of the YAML representation. * @return checksum */ + @Override public String getChecksum() { return checksum; } From 79580e9359f367e01fe33930369e0de2feb6edaf Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 22:50:57 -0400 Subject: [PATCH 006/126] HDDS-13627. Fix checkstyle Change-Id: I985170e38fb8beeb784048e85a08a4c79e1aec97 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 01d33a914556..1bfb0d30705e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; -import com.google.common.annotations.VisibleForTesting; import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; import java.io.File; From afbc5928d74a72622dbf9089cacd4fb4364e4dac Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 10 Oct 2025 14:15:13 -0400 Subject: [PATCH 007/126] HDDS-13627. Add tests Change-Id: Id3f2c49050bc3476b9e0f5f51dacb6d9acc4c2f7 --- .../ozone/om/OmSnapshotLocalDataYaml.java | 3 +- .../hadoop/ozone/om/OmSnapshotManager.java | 7 + .../snapshot/OmSnapshotLocalDataManager.java | 124 +++++- .../TestOmSnapshotLocalDataManager.java | 372 ++++++++++++++++++ 4 files changed, 492 insertions(+), 14 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index 543c4c6397cc..a9e8266fbf89 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -165,7 +165,8 @@ public Object construct(Node node) { MappingNode mnode = (MappingNode) node; Map nodes = constructMapping(mnode); UUID snapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_SNAP_ID)); - UUID prevSnapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID)); + String prevNodeStr = (String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID); + UUID prevSnapId = prevNodeStr == null ? null : UUID.fromString(prevNodeStr); OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index ac59c43c0580..7b9beb80cf6f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -803,6 +803,13 @@ public static Path getSnapshotPath(OMMetadataManager omMetadataManager, Snapshot checkpointPrefix + snapshotInfo.getCheckpointDir()); } + public static Path getSnapshotPath(OMMetadataManager omMetadataManager, UUID snapshotId) { + RDBStore store = (RDBStore) omMetadataManager.getStore(); + String checkpointPrefix = store.getDbLocation().getName(); + return Paths.get(store.getSnapshotsParentDir(), + checkpointPrefix + SnapshotInfo.getCheckpointDirName(snapshotId)); + } + public static String getSnapshotPath(OzoneConfiguration conf, SnapshotInfo snapshotInfo) { return getSnapshotPath(conf, snapshotInfo.getCheckpointDirName()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 1bfb0d30705e..c01b77189e3b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; +import com.google.common.annotations.VisibleForTesting; import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; import java.io.File; @@ -26,8 +27,19 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Objects; +import java.util.Set; +import java.util.Stack; import java.util.UUID; +import java.util.stream.Collectors; +import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; @@ -49,7 +61,8 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); private final ObjectSerializer snapshotLocalDataSerializer; - private final MutableGraph localDataGraph; + private final MutableGraph localDataGraph; + private final Map> versionNodeMap; private final OMMetadataManager omMetadataManager; public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) throws IOException { @@ -63,9 +76,15 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO data.computeAndSetChecksum(yaml); } }; + this.versionNodeMap = new HashMap<>(); init(); } + @VisibleForTesting + Map> getVersionNodeMap() { + return versionNodeMap; + } + /** * Returns the path to the YAML file that stores local properties for the given snapshot. * @@ -83,7 +102,11 @@ public static String getSnapshotLocalPropertyYamlPath(Path snapshotPath) { * @return the path to the snapshot's local property YAML file */ public String getSnapshotLocalPropertyYamlPath(SnapshotInfo snapshotInfo) { - Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo); + return getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId()); + } + + public String getSnapshotLocalPropertyYamlPath(UUID snapshotId) { + Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId); return getSnapshotLocalPropertyYamlPath(snapshotPath); } @@ -102,25 +125,101 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf } public OmSnapshotLocalData getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { - Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotInfo)); - return snapshotLocalDataSerializer.load(snapshotLocalDataPath.toFile()); + return getOmSnapshotLocalData(snapshotInfo.getSnapshotId()); + } + + public OmSnapshotLocalData getOmSnapshotLocalData(UUID snapshotId) throws IOException { + Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotId)); + OmSnapshotLocalData snapshotLocalData = snapshotLocalDataSerializer.load(snapshotLocalDataPath.toFile()); + if (!Objects.equals(snapshotLocalData.getSnapshotId(), snapshotId)) { + throw new IOException("SnapshotId in path : " + snapshotLocalDataPath + " contains snapshotLocalData " + + "corresponding to snapshotId " + snapshotLocalData.getSnapshotId() + ". Expected snapshotId " + snapshotId); + } + return snapshotLocalData; } public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws IOException { return snapshotLocalDataSerializer.load(snapshotDataPath); } + private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { + return versionNodeMap.getOrDefault(snapshotId, Collections.emptyMap()).get(version); + } + + private void addVersionNode(LocalDataVersionNode versionNode) throws IOException { + if (getVersionNode(versionNode.snapshotId, versionNode.version) == null) { + LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : + getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); + if (versionNode.previousSnapshotId != null && previousVersionNode == null) { + throw new IOException("Unable to add " + versionNode + " since previous snapshot with version hasn't been " + + "loaded"); + } + localDataGraph.addNode(versionNode); + if (previousVersionNode != null) { + localDataGraph.putEdge(versionNode, previousVersionNode); + } + versionNodeMap.computeIfAbsent(versionNode.snapshotId, k -> new HashMap<>()) + .put(versionNode.version, versionNode); + } + } + + private List getVersionNodes(OmSnapshotLocalData snapshotLocalData) throws IOException { + UUID snapshotId = snapshotLocalData.getSnapshotId(); + UUID previousSnapshotId = snapshotLocalData.getPreviousSnapshotId(); + return snapshotLocalData.getVersionSstFileInfos().entrySet().stream() + .map(entry -> new LocalDataVersionNode(snapshotId, entry.getKey(), + previousSnapshotId, entry.getValue().getPreviousSnapshotVersion())).collect(Collectors.toList()); + } + + public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { + if (versionNodeMap.containsKey(snapshotLocalData.getSnapshotId())) { + return; + } + Set visitedSnapshotIds = new HashSet<>(); + Stack>> stack = new Stack<>(); + stack.push(Triple.of(snapshotLocalData.getSnapshotId(), snapshotLocalData.getPreviousSnapshotId(), + getVersionNodes(snapshotLocalData))); + while (!stack.isEmpty()) { + Triple> versionNodeToProcess = stack.peek(); + UUID snapId = versionNodeToProcess.getLeft(); + UUID prevSnapId = versionNodeToProcess.getMiddle(); + List versionNodes = versionNodeToProcess.getRight(); + if (visitedSnapshotIds.contains(snapId)) { + for (LocalDataVersionNode versionNode : versionNodes) { + addVersionNode(versionNode); + } + stack.pop(); + } else { + if (prevSnapId != null && !versionNodeMap.containsKey(prevSnapId)) { + OmSnapshotLocalData prevSnapshotLocalData = getOmSnapshotLocalData(prevSnapId); + stack.push(Triple.of(prevSnapshotLocalData.getSnapshotId(), prevSnapshotLocalData.getPreviousSnapshotId(), + getVersionNodes(prevSnapshotLocalData))); + } + visitedSnapshotIds.add(snapId); + } + } + } + private void init() throws IOException { RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); File snapshotDir = new File(store.getSnapshotsParentDir()); - File[] yamlFiles = snapshotDir.listFiles( + File[] localDataFiles = snapshotDir.listFiles( (dir, name) -> name.startsWith(checkpointPrefix) && name.endsWith(YAML_FILE_EXTENSION)); - if (yamlFiles == null) { + if (localDataFiles == null) { throw new IOException("Error while listing yaml files inside directory: " + snapshotDir.getAbsolutePath()); } - for (File yamlFile : yamlFiles) { - System.out.println(yamlFile.getAbsolutePath()); + Arrays.sort(localDataFiles, Comparator.comparing(File::getName)); + for (File localDataFile : localDataFiles) { + OmSnapshotLocalData snapshotLocalData = snapshotLocalDataSerializer.load(localDataFile); + File file = new File(getSnapshotLocalPropertyYamlPath(snapshotLocalData.getSnapshotId())); + String expectedPath = file.getAbsolutePath(); + String actualPath = localDataFile.getAbsolutePath(); + if (!expectedPath.equals(actualPath)) { + throw new IOException("Unexpected path for local data file with snapshotId:" + snapshotLocalData.getSnapshotId() + + " : " + actualPath + ". " + "Expected: " + expectedPath); + } + addVersionNodeWithDependents(snapshotLocalData); } } @@ -135,13 +234,13 @@ public void close() { } } - private static final class VersionLocalDataNode { + static final class LocalDataVersionNode { private UUID snapshotId; private int version; private UUID previousSnapshotId; private int previousSnapshotVersion; - private VersionLocalDataNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { + private LocalDataVersionNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { this.previousSnapshotId = previousSnapshotId; this.previousSnapshotVersion = previousSnapshotVersion; this.snapshotId = snapshotId; @@ -150,11 +249,11 @@ private VersionLocalDataNode(UUID snapshotId, int version, UUID previousSnapshot @Override public boolean equals(Object o) { - if (!(o instanceof VersionLocalDataNode)) { + if (!(o instanceof LocalDataVersionNode)) { return false; } - VersionLocalDataNode that = (VersionLocalDataNode) o; + LocalDataVersionNode that = (LocalDataVersionNode) o; return version == that.version && previousSnapshotVersion == that.previousSnapshotVersion && snapshotId.equals(that.snapshotId) && Objects.equals(previousSnapshotId, that.previousSnapshotId); } @@ -164,5 +263,4 @@ public int hashCode() { return Objects.hash(snapshotId, version, previousSnapshotId, previousSnapshotVersion); } } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java new file mode 100644 index 000000000000..13f19190a7ed --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -0,0 +1,372 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; +import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.RocksDatabase; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData; +import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.util.YamlSerializer; +import org.apache.ozone.compaction.log.SstFileInfo; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.rocksdb.LiveFileMetaData; +import org.yaml.snakeyaml.Yaml; + +/** + * Test class for OmSnapshotLocalDataManager. + */ +public class TestOmSnapshotLocalDataManager { + + private static YamlSerializer snapshotLocalDataYamlSerializer; + + @Mock + private OMMetadataManager omMetadataManager; + + @Mock + private RDBStore rdbStore; + + @Mock + private RDBStore snapshotStore; + + @TempDir + private Path tempDir; + + private OmSnapshotLocalDataManager localDataManager; + private AutoCloseable mocks; + + private File snapshotsDir; + private File dbLocation; + + @BeforeAll + public static void setupClass() { + snapshotLocalDataYamlSerializer = new YamlSerializer( + new OmSnapshotLocalDataYaml.YamlFactory()) { + + @Override + public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IOException { + data.computeAndSetChecksum(yaml); + } + }; + } + + @AfterAll + public static void teardownClass() throws IOException { + snapshotLocalDataYamlSerializer.close(); + snapshotLocalDataYamlSerializer = null; + } + + @BeforeEach + public void setUp() throws IOException { + mocks = MockitoAnnotations.openMocks(this); + + // Setup mock behavior + when(omMetadataManager.getStore()).thenReturn(rdbStore); + + this.snapshotsDir = tempDir.resolve("snapshots").toFile(); + FileUtils.deleteDirectory(snapshotsDir); + snapshotsDir.mkdirs(); + dbLocation = tempDir.resolve("db").toFile(); + FileUtils.deleteDirectory(dbLocation); + dbLocation.mkdirs(); + + when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); + when(rdbStore.getDbLocation()).thenReturn(dbLocation); + } + + @AfterEach + public void tearDown() throws Exception { + if (localDataManager != null) { + localDataManager.close(); + } + if (mocks != null) { + mocks.close(); + } + } + + @Test + public void testConstructor() throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + assertNotNull(localDataManager); + } + + @Test + public void testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOException { + UUID snapshotId = UUID.randomUUID(); + SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); + + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + File yamlPath = new File(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); + assertNotNull(yamlPath); + Path expectedYamlPath = Paths.get(snapshotsDir.getAbsolutePath(), "db" + OM_SNAPSHOT_SEPARATOR + snapshotId + + YAML_FILE_EXTENSION); + assertEquals(expectedYamlPath.toAbsolutePath().toString(), yamlPath.getAbsolutePath()); + } + + @Test + public void testCreateNewOmSnapshotLocalDataFile() throws IOException { + UUID snapshotId = UUID.randomUUID(); + SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); + + // Setup snapshot store mock + File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); + snapshotDbLocation.mkdirs(); + List sstFiles = new ArrayList<>(); + sstFiles.add(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key7")); + sstFiles.add(createMockLiveFileMetaData("file2.sst", KEY_TABLE, "key3", "key9")); + sstFiles.add(createMockLiveFileMetaData("file3.sst", FILE_TABLE, "key1", "key7")); + sstFiles.add(createMockLiveFileMetaData("file4.sst", FILE_TABLE, "key1", "key7")); + sstFiles.add(createMockLiveFileMetaData("file5.sst", DIRECTORY_TABLE, "key1", "key7")); + sstFiles.add(createMockLiveFileMetaData("file6.sst", "colFamily1", "key1", "key7")); + List sstFileInfos = IntStream.range(0, sstFiles.size() - 1) + .mapToObj(sstFiles::get).map(SstFileInfo::new).collect(Collectors.toList()); + when(snapshotStore.getDbLocation()).thenReturn(snapshotDbLocation); + RocksDatabase rocksDatabase = mock(RocksDatabase.class); + when(snapshotStore.getDb()).thenReturn(rocksDatabase); + when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); + + // Verify file was created + OmSnapshotLocalData snapshotLocalData = localDataManager.getOmSnapshotLocalData(snapshotId); + assertEquals(1, snapshotLocalData.getVersionSstFileInfos().size()); + OmSnapshotLocalData.VersionMeta versionMeta = snapshotLocalData.getVersionSstFileInfos().get(0); + OmSnapshotLocalData.VersionMeta expectedVersionMeta = new OmSnapshotLocalData.VersionMeta(0, sstFileInfos); + assertEquals(expectedVersionMeta, versionMeta); + } + + @Test + public void testGetOmSnapshotLocalDataWithSnapshotInfo() throws IOException { + UUID snapshotId = UUID.randomUUID(); + SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); + + // Create and write snapshot local data file + OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); + + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + // Write the file manually for testing + Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId())); + writeLocalDataToFile(localData, yamlPath); + + // Test retrieval + OmSnapshotLocalData retrieved = localDataManager.getOmSnapshotLocalData(snapshotInfo); + + assertNotNull(retrieved); + assertEquals(snapshotId, retrieved.getSnapshotId()); + } + + @Test + public void testGetOmSnapshotLocalDataWithMismatchedSnapshotId() throws IOException { + UUID snapshotId = UUID.randomUUID(); + UUID wrongSnapshotId = UUID.randomUUID(); + + // Create local data with wrong snapshot ID + OmSnapshotLocalData localData = createMockLocalData(wrongSnapshotId, null); + + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotId)); + writeLocalDataToFile(localData, yamlPath); + // Should throw IOException due to mismatched IDs + assertThrows(IOException.class, () -> { + localDataManager.getOmSnapshotLocalData(snapshotId); + }); + } + + @Test + public void testGetOmSnapshotLocalDataWithFile() throws IOException { + UUID snapshotId = UUID.randomUUID(); + + OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); + + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + Path yamlPath = tempDir.resolve("test-snapshot.yaml"); + writeLocalDataToFile(localData, yamlPath); + + OmSnapshotLocalData retrieved = localDataManager + .getOmSnapshotLocalData(yamlPath.toFile()); + + assertNotNull(retrieved); + assertEquals(snapshotId, retrieved.getSnapshotId()); + } + + @Test + public void testAddVersionNodeWithDependents() throws IOException { + List versionIds = Stream.of(UUID.randomUUID(), UUID.randomUUID()) + .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); + UUID snapshotId = versionIds.get(0); + UUID previousSnapshotId = versionIds.get(1); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + // Create snapshot directory structure and files + createSnapshotLocalDataFile(snapshotId, previousSnapshotId); + createSnapshotLocalDataFile(previousSnapshotId, null); + OmSnapshotLocalData localData = createMockLocalData(snapshotId, previousSnapshotId); + + // Should not throw exception + localDataManager.addVersionNodeWithDependents(localData); + } + + @Test + public void testAddVersionNodeWithDependentsAlreadyExists() throws IOException { + UUID snapshotId = UUID.randomUUID(); + + createSnapshotLocalDataFile(snapshotId, null); + + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); + + // First addition + localDataManager.addVersionNodeWithDependents(localData); + + // Second addition - should handle gracefully + localDataManager.addVersionNodeWithDependents(localData); + } + + @Test + public void testInitWithExistingYamlFiles() throws IOException { + List versionIds = Stream.of(UUID.randomUUID(), UUID.randomUUID()) + .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); + UUID snapshotId = versionIds.get(0); + UUID previousSnapshotId = versionIds.get(1); + + createSnapshotLocalDataFile(previousSnapshotId, null); + createSnapshotLocalDataFile(snapshotId, previousSnapshotId); + + // Initialize - should load existing files + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + assertNotNull(localDataManager); + Map> versionMap = + localDataManager.getVersionNodeMap(); + assertEquals(2, versionMap.size()); + assertEquals(versionMap.keySet(), new HashSet<>(versionIds)); + } + + @Test + public void testInitWithInvalidPathThrowsException() throws IOException { + UUID snapshotId = UUID.randomUUID(); + + // Create a file with wrong location + OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); + Path wrongPath = Paths.get(snapshotsDir.getAbsolutePath(), "db-wrong-name.yaml"); + writeLocalDataToFile(localData, wrongPath); + + // Should throw IOException during init + assertThrows(IOException.class, () -> { + new OmSnapshotLocalDataManager(omMetadataManager); + }); + } + + @Test + public void testClose() throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + // Should not throw exception + localDataManager.close(); + } + + // Helper methods + + private SnapshotInfo createMockSnapshotInfo(UUID snapshotId, UUID previousSnapshotId) { + SnapshotInfo.Builder builder = SnapshotInfo.newBuilder() + .setSnapshotId(snapshotId) + .setName("snapshot-" + snapshotId); + + if (previousSnapshotId != null) { + builder.setPathPreviousSnapshotId(previousSnapshotId); + } + + return builder.build(); + } + + private LiveFileMetaData createMockLiveFileMetaData(String fileName, String columnFamilyName, String smallestKey, + String largestKey) { + LiveFileMetaData liveFileMetaData = mock(LiveFileMetaData.class); + when(liveFileMetaData.columnFamilyName()).thenReturn(StringUtils.string2Bytes(columnFamilyName)); + when(liveFileMetaData.fileName()).thenReturn(fileName); + when(liveFileMetaData.smallestKey()).thenReturn(StringUtils.string2Bytes(smallestKey)); + when(liveFileMetaData.largestKey()).thenReturn(StringUtils.string2Bytes(largestKey)); + return liveFileMetaData; + } + + private OmSnapshotLocalData createMockLocalData(UUID snapshotId, UUID previousSnapshotId) { + List sstFiles = new ArrayList<>(); + sstFiles.add(createMockLiveFileMetaData("file1.sst", "columnFamily1", "key1", "key7")); + sstFiles.add(createMockLiveFileMetaData("file2.sst", "columnFamily1", "key3", "key10")); + sstFiles.add(createMockLiveFileMetaData("file3.sst", "columnFamily2", "key1", "key8")); + sstFiles.add(createMockLiveFileMetaData("file4.sst", "columnFamily2", "key0", "key10")); + return new OmSnapshotLocalData(snapshotId, sstFiles, previousSnapshotId); + } + + private void createSnapshotLocalDataFile(UUID snapshotId, UUID previousSnapshotId) + throws IOException { + OmSnapshotLocalData localData = createMockLocalData(snapshotId, previousSnapshotId); + + String fileName = "db" + OM_SNAPSHOT_SEPARATOR + snapshotId.toString() + YAML_FILE_EXTENSION; + Path yamlPath = Paths.get(snapshotsDir.getAbsolutePath(), fileName); + + writeLocalDataToFile(localData, yamlPath); + } + + private void writeLocalDataToFile(OmSnapshotLocalData localData, Path filePath) + throws IOException { + // This is a simplified version - in real implementation, + // you would use the YamlSerializer + snapshotLocalDataYamlSerializer.save(filePath.toFile(), localData); + } +} From 70ac2c7ac04ee126c002e0e1df371a66cada19f9 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 11 Oct 2025 20:40:53 -0400 Subject: [PATCH 008/126] HDDS-13783. Implement locks for OmSnapshotLocalDataManager Change-Id: I432960725b4c6c55aa906b5780cc3027e41e10db --- .../hadoop/ozone/om/OmSnapshotManager.java | 3 +- .../snapshot/OmSnapshotLocalDataManager.java | 422 ++++++++++++++++-- .../TestOmSnapshotLocalDataManager.java | 48 +- 3 files changed, 415 insertions(+), 58 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 7b9beb80cf6f..743c1e584e25 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -196,7 +196,8 @@ public final class OmSnapshotManager implements AutoCloseable { private final AtomicInteger inFlightSnapshotCount = new AtomicInteger(0); public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { - this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager()); + this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager(), + ozoneManager.getConfiguration()); boolean isFilesystemSnapshotEnabled = ozoneManager.isFilesystemSnapshotEnabled(); LOG.info("Ozone filesystem snapshot feature is {}.", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index c01b77189e3b..c9715cf30320 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -17,29 +17,40 @@ package org.apache.hadoop.ozone.om.snapshot; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import com.google.common.annotations.VisibleForTesting; import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; +import com.google.common.util.concurrent.Striped; import java.io.File; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.Stack; import java.util.UUID; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.function.Function; import java.util.stream.Collectors; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; @@ -48,6 +59,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.util.ObjectSerializer; import org.apache.hadoop.ozone.util.YamlSerializer; +import org.apache.ratis.util.function.CheckedSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; @@ -59,13 +71,16 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); + private static final String SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME = "snapshot_local_data_lock"; private final ObjectSerializer snapshotLocalDataSerializer; private final MutableGraph localDataGraph; private final Map> versionNodeMap; private final OMMetadataManager omMetadataManager; + private Striped locks; - public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) throws IOException { + public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager, + OzoneConfiguration configuration) throws IOException { this.localDataGraph = GraphBuilder.directed().build(); this.omMetadataManager = omMetadataManager; this.snapshotLocalDataSerializer = new YamlSerializer( @@ -77,7 +92,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO } }; this.versionNodeMap = new HashMap<>(); - init(); + init(configuration); } @VisibleForTesting @@ -116,28 +131,39 @@ public String getSnapshotLocalPropertyYamlPath(UUID snapshotId) { * @param snapshotInfo snapshot info instance corresponding to snapshot. */ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInfo snapshotInfo) throws IOException { - Path snapshotLocalDataPath = Paths.get( - getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); - Files.deleteIfExists(snapshotLocalDataPath); - OmSnapshotLocalData snapshotLocalDataYaml = new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), - OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); - snapshotLocalDataSerializer.save(snapshotLocalDataPath.toFile(), snapshotLocalDataYaml); + try (WritableOmSnapshotLocalDataProvider snapshotLocalData = + new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(), + () -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), + OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()), + null))) { + snapshotLocalData.commit(); + } } - public OmSnapshotLocalData getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { + public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { return getOmSnapshotLocalData(snapshotInfo.getSnapshotId()); } - public OmSnapshotLocalData getOmSnapshotLocalData(UUID snapshotId) throws IOException { - Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotId)); - OmSnapshotLocalData snapshotLocalData = snapshotLocalDataSerializer.load(snapshotLocalDataPath.toFile()); - if (!Objects.equals(snapshotLocalData.getSnapshotId(), snapshotId)) { - throw new IOException("SnapshotId in path : " + snapshotLocalDataPath + " contains snapshotLocalData " + - "corresponding to snapshotId " + snapshotLocalData.getSnapshotId() + ". Expected snapshotId " + snapshotId); - } + public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(UUID snapshotId) throws IOException { + ReadableOmSnapshotLocalDataProvider snapshotLocalData = new ReadableOmSnapshotLocalDataProvider(snapshotId); return snapshotLocalData; } + public WritableOmSnapshotLocalDataProvider getWritableOmSnapshotLocalData(SnapshotInfo snapshotInfo) + throws IOException { + return getWritableOmSnapshotLocalData(snapshotInfo.getSnapshotId(), snapshotInfo.getPathPreviousSnapshotId()); + } + + public WritableOmSnapshotLocalDataProvider getWritableOmSnapshotLocalData(UUID snapshotId, UUID previousSnapshotId) + throws IOException { + return new WritableOmSnapshotLocalDataProvider(snapshotId, previousSnapshotId); + } + + public WritableOmSnapshotLocalDataProvider getWritableOmSnapshotLocalData(UUID snapshotId) + throws IOException { + return new WritableOmSnapshotLocalDataProvider(snapshotId); + } + public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws IOException { return snapshotLocalDataSerializer.load(snapshotDataPath); } @@ -148,12 +174,9 @@ private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { private void addVersionNode(LocalDataVersionNode versionNode) throws IOException { if (getVersionNode(versionNode.snapshotId, versionNode.version) == null) { + validateVersionAddition(versionNode); LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); - if (versionNode.previousSnapshotId != null && previousVersionNode == null) { - throw new IOException("Unable to add " + versionNode + " since previous snapshot with version hasn't been " + - "loaded"); - } localDataGraph.addNode(versionNode); if (previousVersionNode != null) { localDataGraph.putEdge(versionNode, previousVersionNode); @@ -163,12 +186,13 @@ private void addVersionNode(LocalDataVersionNode versionNode) throws IOException } } - private List getVersionNodes(OmSnapshotLocalData snapshotLocalData) throws IOException { + private Map getVersionNodes(OmSnapshotLocalData snapshotLocalData) throws IOException { UUID snapshotId = snapshotLocalData.getSnapshotId(); UUID previousSnapshotId = snapshotLocalData.getPreviousSnapshotId(); return snapshotLocalData.getVersionSstFileInfos().entrySet().stream() .map(entry -> new LocalDataVersionNode(snapshotId, entry.getKey(), - previousSnapshotId, entry.getValue().getPreviousSnapshotVersion())).collect(Collectors.toList()); + previousSnapshotId, entry.getValue().getPreviousSnapshotVersion())) + .collect(Collectors.toMap(LocalDataVersionNode::getVersion, Function.identity())); } public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { @@ -176,14 +200,14 @@ public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) return; } Set visitedSnapshotIds = new HashSet<>(); - Stack>> stack = new Stack<>(); + Stack>> stack = new Stack<>(); stack.push(Triple.of(snapshotLocalData.getSnapshotId(), snapshotLocalData.getPreviousSnapshotId(), - getVersionNodes(snapshotLocalData))); + getVersionNodes(snapshotLocalData).values())); while (!stack.isEmpty()) { - Triple> versionNodeToProcess = stack.peek(); + Triple> versionNodeToProcess = stack.peek(); UUID snapId = versionNodeToProcess.getLeft(); UUID prevSnapId = versionNodeToProcess.getMiddle(); - List versionNodes = versionNodeToProcess.getRight(); + Collection versionNodes = versionNodeToProcess.getRight(); if (visitedSnapshotIds.contains(snapId)) { for (LocalDataVersionNode versionNode : versionNodes) { addVersionNode(versionNode); @@ -191,16 +215,22 @@ public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) stack.pop(); } else { if (prevSnapId != null && !versionNodeMap.containsKey(prevSnapId)) { - OmSnapshotLocalData prevSnapshotLocalData = getOmSnapshotLocalData(prevSnapId); + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(prevSnapId)); + OmSnapshotLocalData prevSnapshotLocalData = snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); stack.push(Triple.of(prevSnapshotLocalData.getSnapshotId(), prevSnapshotLocalData.getPreviousSnapshotId(), - getVersionNodes(prevSnapshotLocalData))); + getVersionNodes(prevSnapshotLocalData).values())); + } visitedSnapshotIds.add(snapId); } } } - private void init() throws IOException { + private void init(OzoneConfiguration configuration) throws IOException { + boolean fair = configuration.getBoolean(OZONE_MANAGER_FAIR_LOCK, OZONE_MANAGER_FAIR_LOCK_DEFAULT); + String stripeSizeKey = OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX + SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME; + int size = configuration.getInt(stripeSizeKey, OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT); + this.locks = SimpleStriped.readWriteLock(size, fair); RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); File snapshotDir = new File(store.getSnapshotsParentDir()); @@ -223,6 +253,59 @@ private void init() throws IOException { } } + private void validateVersionRemoval(UUID snapshotId, int version) throws IOException { + LocalDataVersionNode versionNode = getVersionNode(snapshotId, version); + if (versionNode != null && localDataGraph.inDegree(versionNode) != 0) { + Set versionNodes = localDataGraph.predecessors(versionNode); + throw new IOException(String.format("Cannot remove Snapshot %s with version : %d since it still has " + + "predecessors : %s", snapshotId, version, versionNodes)); + } + } + + private void validateVersionAddition(LocalDataVersionNode versionNode) throws IOException { + LocalDataVersionNode previousVersionNode = getVersionNode(versionNode.previousSnapshotId, + versionNode.previousSnapshotVersion); + if (versionNode.previousSnapshotId != null && previousVersionNode == null) { + throw new IOException("Unable to add " + versionNode + " since previous snapshot with version hasn't been " + + "loaded"); + } + } + + private Map validateModification(OmSnapshotLocalData snapshotLocalData) + throws IOException { + Map versionNodes = getVersionNodes(snapshotLocalData); + for (LocalDataVersionNode node : versionNodes.values()) { + validateVersionAddition(node); + } + Map snapVersionNodeMap = + getVersionNodeMap().getOrDefault(snapshotLocalData.getSnapshotId(), Collections.emptyMap()); + for (Map.Entry entry : snapVersionNodeMap.entrySet()) { + if (!versionNodes.containsKey(entry.getKey())) { + validateVersionRemoval(snapshotLocalData.getSnapshotId(), entry.getKey()); + } + } + return versionNodes; + } + + private void upsertNode(UUID snapshotId, Map versionNodes) throws IOException { + Map existingVersions = getVersionNodeMap().getOrDefault(snapshotId, + Collections.emptyMap()); + getVersionNodeMap().remove(snapshotId); + for (Map.Entry entry : versionNodes.entrySet()) { + addVersionNode(entry.getValue()); + if (existingVersions.containsKey(entry.getKey())) { + for (LocalDataVersionNode predecessor : + localDataGraph.predecessors(existingVersions.get(entry.getKey()))) { + localDataGraph.putEdge(predecessor, entry.getValue()); + } + } + } + for (LocalDataVersionNode localDataVersionNode : existingVersions.values()) { + localDataGraph.removeNode(localDataVersionNode); + } + getVersionNodeMap().put(snapshotId, versionNodes); + } + @Override public void close() { if (snapshotLocalDataSerializer != null) { @@ -234,11 +317,251 @@ public void close() { } } + /** + * The ReadableOmSnapshotLocalDataProvider class is responsible for managing the + * access and initialization of local snapshot data in a thread-safe manner. + * It provides mechanisms to handle snapshot data, retrieve associated previous + * snapshot data, and manage lock synchronization for safe concurrent operations. + * + * This class works with snapshot identifiers and ensures that the appropriate + * local data for a given snapshot is loaded and accessible. Additionally, it + * maintains locking mechanisms to ensure thread-safe initialization and access + * to both the current and previous snapshot local data. The implementation also + * supports handling errors in the snapshot data initialization process. + * + * Key Functionalities: + * - Initializes and provides access to snapshot local data associated with a + * given snapshot identifier. + * - Resolves and retrieves data for the previous snapshot if applicable. + * - Ensures safe concurrent read operations using locking mechanisms. + * - Validates the integrity and consistency of snapshot data during initialization. + * - Ensures that appropriate locks are released upon closing. + * + * Thread-Safety: + * This class utilizes locks to guarantee thread-safe operations when accessing + * or modifying snapshot data. State variables relating to snapshot data are + * properly synchronized to ensure consistency during concurrent operations. + * + * Usage Considerations: + * - Ensure proper handling of exceptions while interacting with this class, + * particularly during initialization and cleanup. + * - Always invoke the {@code close()} method after usage to release acquired locks + * and avoid potential deadlocks. + */ + public class ReadableOmSnapshotLocalDataProvider implements AutoCloseable { + + private final UUID snapshotId; + private final Lock lock; + private final OmSnapshotLocalData snapshotLocalData; + private OmSnapshotLocalData previousSnapshotLocalData; + private volatile boolean isPreviousSnapshotLoaded = false; + private final UUID resolvedPreviousSnapshotId; + + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { + this(snapshotId, locks.get(snapshotId).readLock()); + } + + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock) throws IOException { + this(snapshotId, lock, null, null); + } + + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock, + CheckedSupplier, IOException> snapshotLocalDataSupplier, + UUID snapshotIdToBeResolved) throws IOException { + this.snapshotId = snapshotId; + this.lock = lock; + Pair pair = initialize(lock, snapshotId, snapshotIdToBeResolved, + snapshotLocalDataSupplier); + this.snapshotLocalData = pair.getKey(); + this.resolvedPreviousSnapshotId = pair.getValue(); + this.previousSnapshotLocalData = null; + this.isPreviousSnapshotLoaded = false; + } + + public OmSnapshotLocalData getSnapshotLocalData() { + return snapshotLocalData; + } + + public OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { + if (!isPreviousSnapshotLoaded) { + synchronized (this) { + if (!isPreviousSnapshotLoaded) { + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); + this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : + snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + this.isPreviousSnapshotLoaded = true; + } + } + } + return previousSnapshotLocalData; + } + + private Pair initialize(Lock snapIdLock, UUID snapId, UUID toResolveSnapshotId, + CheckedSupplier, IOException> snapshotLocalDataSupplier) + throws IOException { + snapIdLock.lock(); + ReadWriteLock lockIdAcquired = locks.get(snapId); + ReadWriteLock previousReadLockAcquired = null; + boolean haspreviousReadLockAcquiredAcquired = false; + try { + snapshotLocalDataSupplier = snapshotLocalDataSupplier == null ? () -> { + File snapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(snapId)); + return Pair.of(snapshotLocalDataSerializer.load(snapshotLocalDataFile), snapshotLocalDataFile); + } : snapshotLocalDataSupplier; + Pair pair = snapshotLocalDataSupplier.get(); + OmSnapshotLocalData ssLocalData = pair.getKey(); + if (!Objects.equals(ssLocalData.getSnapshotId(), snapId)) { + String loadPath = pair.getValue() == null ? null : pair.getValue().getAbsolutePath(); + throw new IOException("SnapshotId in path : " + loadPath + " contains snapshotLocalData corresponding " + + "to snapshotId " + ssLocalData.getSnapshotId() + ". Expected snapshotId " + snapId); + } + + UUID previousSnapshotId = ssLocalData.getPreviousSnapshotId(); + if (previousSnapshotId != null) { + if (versionNodeMap.containsKey(previousSnapshotId)) { + throw new IOException(String.format("Operating on snapshot id : %s with previousSnapshotId: %s invalid " + + "since previousSnapshotId is not loaded.", snapId, previousSnapshotId)); + } + toResolveSnapshotId = toResolveSnapshotId == null ? ssLocalData.getPreviousSnapshotId() : + toResolveSnapshotId; + previousReadLockAcquired = locks.get(previousSnapshotId); + if (lockIdAcquired == previousReadLockAcquired) { + previousReadLockAcquired = null; + } + if (previousReadLockAcquired != null) { + previousReadLockAcquired.readLock().lock(); + haspreviousReadLockAcquiredAcquired = true; + } + Map previousVersionNodeMap = versionNodeMap.get(previousSnapshotId); + UUID currentIteratedSnapshotId = previousSnapshotId; + while (!Objects.equals(currentIteratedSnapshotId, toResolveSnapshotId)) { + Set previousIds = + previousVersionNodeMap.values().stream().map(LocalDataVersionNode::getPreviousSnapshotId) + .collect(Collectors.toSet()); + if (previousIds.size() > 1) { + throw new IOException(String.format("Snapshot %s versions has multiple previous snapshotIds %s", + currentIteratedSnapshotId, previousIds)); + } + if (previousIds.isEmpty()) { + throw new IOException(String.format("Snapshot %s versions doesn't have previous Id thus snapshot " + + "%s cannot be resolved against id %s", + currentIteratedSnapshotId, snapId, toResolveSnapshotId)); + } + UUID previousId = previousIds.iterator().next(); + ReadWriteLock lockToBeAcquired = locks.get(previousId); + if (lockToBeAcquired == lockIdAcquired) { + lockToBeAcquired = null; + } + if (lockToBeAcquired != null) { + if (lockToBeAcquired != previousReadLockAcquired) { + lockToBeAcquired.readLock().lock(); + haspreviousReadLockAcquiredAcquired = true; + } else { + previousReadLockAcquired = null; + } + } + try { + for (Map.Entry entry : previousVersionNodeMap.entrySet()) { + Set versionNode = localDataGraph.successors(entry.getValue()); + if (versionNode.size() > 1) { + throw new IOException(String.format("Snapshot %s version %d has multiple successors %s", + currentIteratedSnapshotId, entry.getValue(), versionNode)); + } + entry.setValue(versionNode.iterator().next()); + } + } finally { + if (previousReadLockAcquired != null) { + previousReadLockAcquired.readLock().unlock(); + } + previousReadLockAcquired = lockToBeAcquired; + currentIteratedSnapshotId = previousId; + } + } + ssLocalData.setPreviousSnapshotId(toResolveSnapshotId); + Map versionMetaMap = ssLocalData.getVersionSstFileInfos(); + for (Map.Entry entry : versionMetaMap.entrySet()) { + OmSnapshotLocalData.VersionMeta versionMeta = entry.getValue(); + LocalDataVersionNode relativePreviousVersionNode = + previousVersionNodeMap.get(versionMeta.getPreviousSnapshotVersion()); + if (relativePreviousVersionNode == null) { + throw new IOException(String.format("Unable to resolve previous version node for snapshot: %s" + + " with version : %d against previous snapshot %s previous version : %d", + snapId, entry.getKey(), toResolveSnapshotId, versionMeta.getPreviousSnapshotVersion())); + } + } + } else { + toResolveSnapshotId = null; + } + return Pair.of(ssLocalData, toResolveSnapshotId); + } catch (IOException e) { + if (previousReadLockAcquired != null && haspreviousReadLockAcquiredAcquired) { + previousReadLockAcquired.readLock().unlock(); + } + snapIdLock.unlock(); + throw e; + } + } + + @Override + public void close() { + if (resolvedPreviousSnapshotId != null) { + locks.get(resolvedPreviousSnapshotId).readLock().unlock(); + } + lock.unlock(); + } + } + + /** + * This class represents a writable provider for managing local data of + * OmSnapshot. It extends the functionality of {@code ReadableOmSnapshotLocalDataProvider} + * and provides support for write operations, such as committing changes. + * + * The writable snapshot data provider interacts with version nodes and + * facilitates atomic updates to snapshot properties and files. + * + * This class is designed to ensure thread-safe operations and uses locks to + * guarantee consistent state across concurrent activities. + * + * The default usage includes creating an instance of this provider with + * specific snapshot identifiers and optionally handling additional parameters + * such as data resolution or a supplier for snapshot data. + */ + public final class WritableOmSnapshotLocalDataProvider extends ReadableOmSnapshotLocalDataProvider { + + private WritableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { + super(snapshotId, locks.get(snapshotId).writeLock()); + } + + private WritableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapshotIdToBeResolved) throws IOException { + super(snapshotId, locks.get(snapshotId).writeLock(), null, snapshotIdToBeResolved); + } + + private WritableOmSnapshotLocalDataProvider(UUID snapshotId, + CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { + super(snapshotId, locks.get(snapshotId).writeLock(), snapshotLocalDataSupplier, null); + } + + public synchronized void commit() throws IOException { + Map localDataVersionNodes = validateModification(super.snapshotLocalData); + String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); + String tmpFilePath = filePath + ".tmp"; + File tmpFile = new File(tmpFilePath); + if (tmpFile.exists()) { + tmpFile.delete(); + } + snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); + FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING); + upsertNode(super.snapshotId, localDataVersionNodes); + + } + } + static final class LocalDataVersionNode { - private UUID snapshotId; - private int version; - private UUID previousSnapshotId; - private int previousSnapshotVersion; + private final UUID snapshotId; + private final int version; + private final UUID previousSnapshotId; + private final int previousSnapshotVersion; private LocalDataVersionNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { this.previousSnapshotId = previousSnapshotId; @@ -247,12 +570,27 @@ private LocalDataVersionNode(UUID snapshotId, int version, UUID previousSnapshot this.version = version; } + private int getVersion() { + return version; + } + + private UUID getSnapshotId() { + return snapshotId; + } + + private UUID getPreviousSnapshotId() { + return previousSnapshotId; + } + + private int getPreviousSnapshotVersion() { + return previousSnapshotVersion; + } + @Override public boolean equals(Object o) { if (!(o instanceof LocalDataVersionNode)) { return false; } - LocalDataVersionNode that = (LocalDataVersionNode) o; return version == that.version && previousSnapshotVersion == that.previousSnapshotVersion && snapshotId.equals(that.snapshotId) && Objects.equals(previousSnapshotId, that.previousSnapshotId); @@ -262,5 +600,15 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(snapshotId, version, previousSnapshotId, previousSnapshotVersion); } + + @Override + public String toString() { + return "LocalDataVersionNode{" + + "snapshotId=" + snapshotId + + ", version=" + version + + ", previousSnapshotId=" + previousSnapshotId + + ", previousSnapshotVersion=" + previousSnapshotVersion + + '}'; + } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 13f19190a7ed..cc3b758b8451 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -43,6 +43,7 @@ import java.util.stream.Stream; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -50,6 +51,7 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ozone.compaction.log.SstFileInfo; import org.junit.jupiter.api.AfterAll; @@ -70,6 +72,8 @@ public class TestOmSnapshotLocalDataManager { private static YamlSerializer snapshotLocalDataYamlSerializer; + private static OzoneConfiguration conf; + @Mock private OMMetadataManager omMetadataManager; @@ -90,6 +94,7 @@ public class TestOmSnapshotLocalDataManager { @BeforeAll public static void setupClass() { + conf = new OzoneConfiguration(); snapshotLocalDataYamlSerializer = new YamlSerializer( new OmSnapshotLocalDataYaml.YamlFactory()) { @@ -136,7 +141,7 @@ public void tearDown() throws Exception { @Test public void testConstructor() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); assertNotNull(localDataManager); } @@ -145,7 +150,7 @@ public void testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOExce UUID snapshotId = UUID.randomUUID(); SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); File yamlPath = new File(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); assertNotNull(yamlPath); @@ -175,16 +180,19 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { RocksDatabase rocksDatabase = mock(RocksDatabase.class); when(snapshotStore.getDb()).thenReturn(rocksDatabase); when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); // Verify file was created - OmSnapshotLocalData snapshotLocalData = localDataManager.getOmSnapshotLocalData(snapshotId); - assertEquals(1, snapshotLocalData.getVersionSstFileInfos().size()); - OmSnapshotLocalData.VersionMeta versionMeta = snapshotLocalData.getVersionSstFileInfos().get(0); - OmSnapshotLocalData.VersionMeta expectedVersionMeta = new OmSnapshotLocalData.VersionMeta(0, sstFileInfos); - assertEquals(expectedVersionMeta, versionMeta); + OmSnapshotLocalData.VersionMeta versionMeta; + try (ReadableOmSnapshotLocalDataProvider snapshotLocalData = localDataManager.getOmSnapshotLocalData(snapshotId)) { + assertEquals(1, snapshotLocalData.getSnapshotLocalData().getVersionSstFileInfos().size()); + versionMeta = snapshotLocalData.getSnapshotLocalData().getVersionSstFileInfos().get(0); + OmSnapshotLocalData.VersionMeta expectedVersionMeta = + new OmSnapshotLocalData.VersionMeta(0, sstFileInfos); + assertEquals(expectedVersionMeta, versionMeta); + } } @Test @@ -195,17 +203,17 @@ public void testGetOmSnapshotLocalDataWithSnapshotInfo() throws IOException { // Create and write snapshot local data file OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); // Write the file manually for testing Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId())); writeLocalDataToFile(localData, yamlPath); // Test retrieval - OmSnapshotLocalData retrieved = localDataManager.getOmSnapshotLocalData(snapshotInfo); - - assertNotNull(retrieved); - assertEquals(snapshotId, retrieved.getSnapshotId()); + try (ReadableOmSnapshotLocalDataProvider retrieved = localDataManager.getOmSnapshotLocalData(snapshotInfo)) { + assertNotNull(retrieved.getSnapshotLocalData()); + assertEquals(snapshotId, retrieved.getSnapshotLocalData().getSnapshotId()); + } } @Test @@ -216,7 +224,7 @@ public void testGetOmSnapshotLocalDataWithMismatchedSnapshotId() throws IOExcept // Create local data with wrong snapshot ID OmSnapshotLocalData localData = createMockLocalData(wrongSnapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotId)); writeLocalDataToFile(localData, yamlPath); @@ -232,7 +240,7 @@ public void testGetOmSnapshotLocalDataWithFile() throws IOException { OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); Path yamlPath = tempDir.resolve("test-snapshot.yaml"); writeLocalDataToFile(localData, yamlPath); @@ -250,7 +258,7 @@ public void testAddVersionNodeWithDependents() throws IOException { .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); UUID snapshotId = versionIds.get(0); UUID previousSnapshotId = versionIds.get(1); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); // Create snapshot directory structure and files createSnapshotLocalDataFile(snapshotId, previousSnapshotId); createSnapshotLocalDataFile(previousSnapshotId, null); @@ -266,7 +274,7 @@ public void testAddVersionNodeWithDependentsAlreadyExists() throws IOException { createSnapshotLocalDataFile(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); @@ -288,7 +296,7 @@ public void testInitWithExistingYamlFiles() throws IOException { createSnapshotLocalDataFile(snapshotId, previousSnapshotId); // Initialize - should load existing files - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); assertNotNull(localDataManager); Map> versionMap = @@ -308,13 +316,13 @@ public void testInitWithInvalidPathThrowsException() throws IOException { // Should throw IOException during init assertThrows(IOException.class, () -> { - new OmSnapshotLocalDataManager(omMetadataManager); + new OmSnapshotLocalDataManager(omMetadataManager, conf); }); } @Test public void testClose() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); // Should not throw exception localDataManager.close(); From b554cc7a2ab6a49659b25dfe7d2dbe829819fa46 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 11 Oct 2025 20:49:53 -0400 Subject: [PATCH 009/126] HDDS-13783. Implement locks for OmSnapshotLocalDataManager Change-Id: I3c5514e5bbd251a2b5297d8f074cfde5c71fa543 --- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index c9715cf30320..8018b94fecf0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -149,6 +149,13 @@ public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(UUID snapshotI return snapshotLocalData; } + public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(UUID snapshotId, UUID previousSnapshotID) + throws IOException { + ReadableOmSnapshotLocalDataProvider snapshotLocalData = new ReadableOmSnapshotLocalDataProvider(snapshotId, + previousSnapshotID); + return snapshotLocalData; + } + public WritableOmSnapshotLocalDataProvider getWritableOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { return getWritableOmSnapshotLocalData(snapshotInfo.getSnapshotId(), snapshotInfo.getPathPreviousSnapshotId()); @@ -361,6 +368,10 @@ protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOExceptio this(snapshotId, locks.get(snapshotId).readLock()); } + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapIdToResolve) throws IOException { + this(snapshotId, locks.get(snapshotId).readLock(), null, snapIdToResolve); + } + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock) throws IOException { this(snapshotId, lock, null, null); } From 49eccfac45c62092e775bdccb589381b978cee8f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 11 Oct 2025 20:51:14 -0400 Subject: [PATCH 010/126] HDDS-13783. Refactor inline variable Change-Id: Ib5a9e6c91bdccba17820263c47eaf2c8400e930d --- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 8018b94fecf0..d384ab164834 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -145,15 +145,12 @@ public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(SnapshotInfo s } public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(UUID snapshotId) throws IOException { - ReadableOmSnapshotLocalDataProvider snapshotLocalData = new ReadableOmSnapshotLocalDataProvider(snapshotId); - return snapshotLocalData; + return new ReadableOmSnapshotLocalDataProvider(snapshotId); } public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(UUID snapshotId, UUID previousSnapshotID) throws IOException { - ReadableOmSnapshotLocalDataProvider snapshotLocalData = new ReadableOmSnapshotLocalDataProvider(snapshotId, - previousSnapshotID); - return snapshotLocalData; + return new ReadableOmSnapshotLocalDataProvider(snapshotId, previousSnapshotID); } public WritableOmSnapshotLocalDataProvider getWritableOmSnapshotLocalData(SnapshotInfo snapshotInfo) From 51eda04dc47976fa1114567ba1b14327cf14fb57 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 07:22:40 -0400 Subject: [PATCH 011/126] HDDS-13627. Refactor map data structure Change-Id: Ica36e0615c7bc6aa9b6a7f6fafafd0f830d4bafb --- .../snapshot/OmSnapshotLocalDataManager.java | 113 ++++++++++++------ .../TestOmSnapshotLocalDataManager.java | 2 +- 2 files changed, 75 insertions(+), 40 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index c01b77189e3b..75611955b6f8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -28,21 +28,19 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.Arrays; -import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.Stack; import java.util.UUID; -import java.util.stream.Collectors; -import org.apache.commons.lang3.tuple.Triple; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -62,7 +60,7 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private final ObjectSerializer snapshotLocalDataSerializer; private final MutableGraph localDataGraph; - private final Map> versionNodeMap; + private final Map versionNodeMap; private final OMMetadataManager omMetadataManager; public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) throws IOException { @@ -81,7 +79,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO } @VisibleForTesting - Map> getVersionNodeMap() { + Map getVersionNodeMap() { return versionNodeMap; } @@ -143,32 +141,34 @@ public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws } private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { - return versionNodeMap.getOrDefault(snapshotId, Collections.emptyMap()).get(version); + if (!versionNodeMap.containsKey(snapshotId)) { + return null; + } + return versionNodeMap.get(snapshotId).getVersionNode(version); } - private void addVersionNode(LocalDataVersionNode versionNode) throws IOException { - if (getVersionNode(versionNode.snapshotId, versionNode.version) == null) { - LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : - getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); - if (versionNode.previousSnapshotId != null && previousVersionNode == null) { - throw new IOException("Unable to add " + versionNode + " since previous snapshot with version hasn't been " + - "loaded"); - } - localDataGraph.addNode(versionNode); - if (previousVersionNode != null) { - localDataGraph.putEdge(versionNode, previousVersionNode); + private boolean addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) + throws IOException { + if (!versionNodeMap.containsKey(snapshotId)) { + for (LocalDataVersionNode versionNode : snapshotVersionsMeta.getSnapshotVersions().values()) { + if (getVersionNode(versionNode.snapshotId, versionNode.version) != null) { + throw new IOException("Unable to add " + versionNode + " since it already exists"); + } + LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : + getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); + if (versionNode.previousSnapshotId != null && previousVersionNode == null) { + throw new IOException("Unable to add " + versionNode + " since previous snapshot with version hasn't been " + + "loaded"); + } + localDataGraph.addNode(versionNode); + if (previousVersionNode != null) { + localDataGraph.putEdge(versionNode, previousVersionNode); + } } - versionNodeMap.computeIfAbsent(versionNode.snapshotId, k -> new HashMap<>()) - .put(versionNode.version, versionNode); + versionNodeMap.put(snapshotId, snapshotVersionsMeta); + return true; } - } - - private List getVersionNodes(OmSnapshotLocalData snapshotLocalData) throws IOException { - UUID snapshotId = snapshotLocalData.getSnapshotId(); - UUID previousSnapshotId = snapshotLocalData.getPreviousSnapshotId(); - return snapshotLocalData.getVersionSstFileInfos().entrySet().stream() - .map(entry -> new LocalDataVersionNode(snapshotId, entry.getKey(), - previousSnapshotId, entry.getValue().getPreviousSnapshotVersion())).collect(Collectors.toList()); + return false; } public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { @@ -176,24 +176,20 @@ public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) return; } Set visitedSnapshotIds = new HashSet<>(); - Stack>> stack = new Stack<>(); - stack.push(Triple.of(snapshotLocalData.getSnapshotId(), snapshotLocalData.getPreviousSnapshotId(), - getVersionNodes(snapshotLocalData))); + Stack> stack = new Stack<>(); + stack.push(Pair.of(snapshotLocalData.getSnapshotId(), new SnapshotVersionsMeta(snapshotLocalData))); while (!stack.isEmpty()) { - Triple> versionNodeToProcess = stack.peek(); + Pair versionNodeToProcess = stack.peek(); UUID snapId = versionNodeToProcess.getLeft(); - UUID prevSnapId = versionNodeToProcess.getMiddle(); - List versionNodes = versionNodeToProcess.getRight(); + SnapshotVersionsMeta snapshotVersionsMeta = versionNodeToProcess.getRight(); if (visitedSnapshotIds.contains(snapId)) { - for (LocalDataVersionNode versionNode : versionNodes) { - addVersionNode(versionNode); - } + addSnapshotVersionMeta(snapId, snapshotVersionsMeta); stack.pop(); } else { + UUID prevSnapId = snapshotVersionsMeta.getPreviousSnapshotId(); if (prevSnapId != null && !versionNodeMap.containsKey(prevSnapId)) { OmSnapshotLocalData prevSnapshotLocalData = getOmSnapshotLocalData(prevSnapId); - stack.push(Triple.of(prevSnapshotLocalData.getSnapshotId(), prevSnapshotLocalData.getPreviousSnapshotId(), - getVersionNodes(prevSnapshotLocalData))); + stack.push(Pair.of(prevSnapshotLocalData.getSnapshotId(), new SnapshotVersionsMeta(prevSnapshotLocalData))); } visitedSnapshotIds.add(snapId); } @@ -263,4 +259,43 @@ public int hashCode() { return Objects.hash(snapshotId, version, previousSnapshotId, previousSnapshotVersion); } } + + static final class SnapshotVersionsMeta { + private final UUID previousSnapshotId; + private final Map snapshotVersions; + private int version; + + private SnapshotVersionsMeta(OmSnapshotLocalData snapshotLocalData) { + this.previousSnapshotId = snapshotLocalData.getPreviousSnapshotId(); + this.snapshotVersions = getVersionNodes(snapshotLocalData); + this.version = snapshotLocalData.getVersion(); + } + + private Map getVersionNodes(OmSnapshotLocalData snapshotLocalData) { + UUID snapshotId = snapshotLocalData.getSnapshotId(); + UUID prevSnapshotId = snapshotLocalData.getPreviousSnapshotId(); + Map versionNodes = new HashMap<>(); + for (Map.Entry entry : snapshotLocalData.getVersionSstFileInfos().entrySet()) { + versionNodes.put(entry.getKey(), new LocalDataVersionNode(snapshotId, entry.getKey(), + prevSnapshotId, entry.getValue().getPreviousSnapshotVersion())); + } + return versionNodes; + } + + UUID getPreviousSnapshotId() { + return previousSnapshotId; + } + + int getVersion() { + return version; + } + + Map getSnapshotVersions() { + return snapshotVersions; + } + + LocalDataVersionNode getVersionNode(int snapshotVersion) { + return snapshotVersions.get(snapshotVersion); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 13f19190a7ed..d0ac0ad19bcb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -291,7 +291,7 @@ public void testInitWithExistingYamlFiles() throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); assertNotNull(localDataManager); - Map> versionMap = + Map versionMap = localDataManager.getVersionNodeMap(); assertEquals(2, versionMap.size()); assertEquals(versionMap.keySet(), new HashSet<>(versionIds)); From 96689fafc305eadd65605d48676e1befbbe3da77 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 08:01:37 -0400 Subject: [PATCH 012/126] HDDS-13783. Add more condition to upsert Change-Id: I1d93dbc048a42cc55ff1f8ffa420e52f967527b8 --- .../snapshot/OmSnapshotLocalDataManager.java | 51 ++++++++++--------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 81d2b884ae14..6ab3d404732d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -32,7 +32,6 @@ import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -44,14 +43,11 @@ import java.util.UUID; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; -import java.util.function.Function; import java.util.stream.Collectors; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; -import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.SimpleStriped; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; @@ -219,7 +215,8 @@ public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) } else { UUID prevSnapId = snapshotVersionsMeta.getPreviousSnapshotId(); if (prevSnapId != null && !versionNodeMap.containsKey(prevSnapId)) { - OmSnapshotLocalData prevSnapshotLocalData = getOmSnapshotLocalData(prevSnapId); + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(prevSnapId)); + OmSnapshotLocalData prevSnapshotLocalData = snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); stack.push(Pair.of(prevSnapshotLocalData.getSnapshotId(), new SnapshotVersionsMeta(prevSnapshotLocalData))); } visitedSnapshotIds.add(snapId); @@ -272,31 +269,34 @@ private void validateVersionAddition(LocalDataVersionNode versionNode) throws IO } } - private Map validateModification(OmSnapshotLocalData snapshotLocalData) + private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLocalData) throws IOException { - Map versionNodes = getVersionNodes(snapshotLocalData); - for (LocalDataVersionNode node : versionNodes.values()) { + SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); + for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { validateVersionAddition(node); } - Map snapVersionNodeMap = - getVersionNodeMap().getOrDefault(snapshotLocalData.getSnapshotId(), Collections.emptyMap()); - for (Map.Entry entry : snapVersionNodeMap.entrySet()) { - if (!versionNodes.containsKey(entry.getKey())) { - validateVersionRemoval(snapshotLocalData.getSnapshotId(), entry.getKey()); + UUID snapshotId = snapshotLocalData.getSnapshotId(); + Map existingVersions = getVersionNodeMap().containsKey(snapshotId) ? + getVersionNodeMap().get(snapshotId).getSnapshotVersions() : Collections.emptyMap(); + for (Map.Entry entry : existingVersions.entrySet()) { + if (!versionsToBeAdded.getSnapshotVersions().containsKey(entry.getKey())) { + validateVersionRemoval(snapshotId, entry.getKey()); } } - return versionNodes; + return versionsToBeAdded; } - private void upsertNode(UUID snapshotId, Map versionNodes) throws IOException { - Map existingVersions = getVersionNodeMap().getOrDefault(snapshotId, - Collections.emptyMap()); - getVersionNodeMap().remove(snapshotId); - for (Map.Entry entry : versionNodes.entrySet()) { - addVersionNode(entry.getValue()); + private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); + Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : + existingSnapVersions.getSnapshotVersions(); + if (!addSnapshotVersionMeta(snapshotId, snapshotVersions)) { + throw new IOException("Unable to upsert " + snapshotVersions + " since it already exists"); + } + + for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { if (existingVersions.containsKey(entry.getKey())) { - for (LocalDataVersionNode predecessor : - localDataGraph.predecessors(existingVersions.get(entry.getKey()))) { + for (LocalDataVersionNode predecessor : localDataGraph.predecessors(existingVersions.get(entry.getKey()))) { localDataGraph.putEdge(predecessor, entry.getValue()); } } @@ -304,7 +304,7 @@ private void upsertNode(UUID snapshotId, Map vers for (LocalDataVersionNode localDataVersionNode : existingVersions.values()) { localDataGraph.removeNode(localDataVersionNode); } - getVersionNodeMap().put(snapshotId, versionNodes); + getVersionNodeMap().put(snapshotId, snapshotVersions); } @Override @@ -437,7 +437,8 @@ private Pair initialize(Lock snapIdLock, UUID snapId, previousReadLockAcquired.readLock().lock(); haspreviousReadLockAcquiredAcquired = true; } - Map previousVersionNodeMap = versionNodeMap.get(previousSnapshotId); + Map previousVersionNodeMap = versionNodeMap.get(previousSnapshotId) + .getSnapshotVersions(); UUID currentIteratedSnapshotId = previousSnapshotId; while (!Objects.equals(currentIteratedSnapshotId, toResolveSnapshotId)) { Set previousIds = @@ -547,7 +548,7 @@ private WritableOmSnapshotLocalDataProvider(UUID snapshotId, } public synchronized void commit() throws IOException { - Map localDataVersionNodes = validateModification(super.snapshotLocalData); + SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); String tmpFilePath = filePath + ".tmp"; File tmpFile = new File(tmpFilePath); From 0674299a8eabb9abe0b7103dcf72b265d46669b4 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 09:05:04 -0400 Subject: [PATCH 013/126] HDDS-13783. Add java doc comment Change-Id: I34202928a7a367dd0a1e57219317ff34de352b78 --- .../snapshot/OmSnapshotLocalDataManager.java | 62 +++++++++++++------ 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 6ab3d404732d..b002dda14186 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -44,6 +44,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.stream.Collectors; +import jnr.ffi.annotations.In; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -177,13 +178,10 @@ private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { return versionNodeMap.get(snapshotId).getVersionNode(version); } - private boolean addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) + private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) throws IOException { if (!versionNodeMap.containsKey(snapshotId)) { for (LocalDataVersionNode versionNode : snapshotVersionsMeta.getSnapshotVersions().values()) { - if (getVersionNode(versionNode.snapshotId, versionNode.version) != null) { - throw new IOException("Unable to add " + versionNode + " since it already exists"); - } validateVersionAddition(versionNode); LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); @@ -193,9 +191,7 @@ private boolean addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta sna } } versionNodeMap.put(snapshotId, snapshotVersionsMeta); - return true; } - return false; } public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { @@ -290,21 +286,20 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : existingSnapVersions.getSnapshotVersions(); - if (!addSnapshotVersionMeta(snapshotId, snapshotVersions)) { - throw new IOException("Unable to upsert " + snapshotVersions + " since it already exists"); - } - + Map> predecessors = new HashMap<>(); + // Track all predecessors of the existing versions and remove the node from the graph. + for (Map.Entry existingVersion : existingVersions.entrySet()) { + predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersions.get(existingVersion))); + localDataGraph.removeNode(existingVersion.getValue()); + } + // Add the nodes to be added in the graph and map. + addSnapshotVersionMeta(snapshotId, snapshotVersions); + // Reconnect all the predecessors for existing nodes. for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { - if (existingVersions.containsKey(entry.getKey())) { - for (LocalDataVersionNode predecessor : localDataGraph.predecessors(existingVersions.get(entry.getKey()))) { - localDataGraph.putEdge(predecessor, entry.getValue()); - } + for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { + localDataGraph.putEdge(predecessor, entry.getValue()); } } - for (LocalDataVersionNode localDataVersionNode : existingVersions.values()) { - localDataGraph.removeNode(localDataVersionNode); - } - getVersionNodeMap().put(snapshotId, snapshotVersions); } @Override @@ -401,10 +396,15 @@ public OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { return previousSnapshotLocalData; } + /** + * Intializer the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the + * snapshotId to be resolved by iterating through the chain of previous snapshot ids. + */ private Pair initialize(Lock snapIdLock, UUID snapId, UUID toResolveSnapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { snapIdLock.lock(); + // Get the Lock instance for the snapshot id and track it. ReadWriteLock lockIdAcquired = locks.get(snapId); ReadWriteLock previousReadLockAcquired = null; boolean haspreviousReadLockAcquiredAcquired = false; @@ -420,7 +420,9 @@ private Pair initialize(Lock snapIdLock, UUID snapId, throw new IOException("SnapshotId in path : " + loadPath + " contains snapshotLocalData corresponding " + "to snapshotId " + ssLocalData.getSnapshotId() + ". Expected snapshotId " + snapId); } - + // Get previous snapshotId and acquire read lock on the id. We need to do this outside the loop instead of a + // do while loop since the nodes that may be added may not be present in the graph so it may not be possible + // to iterate through the chain. UUID previousSnapshotId = ssLocalData.getPreviousSnapshotId(); if (previousSnapshotId != null) { if (versionNodeMap.containsKey(previousSnapshotId)) { @@ -430,6 +432,9 @@ private Pair initialize(Lock snapIdLock, UUID snapId, toResolveSnapshotId = toResolveSnapshotId == null ? ssLocalData.getPreviousSnapshotId() : toResolveSnapshotId; previousReadLockAcquired = locks.get(previousSnapshotId); + // Stripe lock could return the same lock object for multiple snapshotIds so in case a write lock is + // acquired previously on the same lock then this could cause a deadlock. If the same lock instance is + // returned then acquiring this read lock is unnecessary. if (lockIdAcquired == previousReadLockAcquired) { previousReadLockAcquired = null; } @@ -440,7 +445,10 @@ private Pair initialize(Lock snapIdLock, UUID snapId, Map previousVersionNodeMap = versionNodeMap.get(previousSnapshotId) .getSnapshotVersions(); UUID currentIteratedSnapshotId = previousSnapshotId; + // Iterate through the chain of previous snapshot ids until the snapshot id to be resolved is found. while (!Objects.equals(currentIteratedSnapshotId, toResolveSnapshotId)) { + // All versions for the snapshot should point to the same previous snapshot id. Otherwise this is a sign + // of corruption. Set previousIds = previousVersionNodeMap.values().stream().map(LocalDataVersionNode::getPreviousSnapshotId) .collect(Collectors.toSet()); @@ -455,27 +463,42 @@ private Pair initialize(Lock snapIdLock, UUID snapId, } UUID previousId = previousIds.iterator().next(); ReadWriteLock lockToBeAcquired = locks.get(previousId); + // If stripe lock returns the same lock object corresponding to snapshot id then no read lock needs to be + // acquired. if (lockToBeAcquired == lockIdAcquired) { lockToBeAcquired = null; } if (lockToBeAcquired != null) { + // If a read lock has already been acquired on the same lock based on the previous iteration snapshot id + // then no need to acquire another read lock on the same lock and this lock could just piggyback on the + // same lock. if (lockToBeAcquired != previousReadLockAcquired) { lockToBeAcquired.readLock().lock(); haspreviousReadLockAcquiredAcquired = true; } else { + // Set the previous read lock to null since the same lock instance is going to be used for current + // iteration lock as well. previousReadLockAcquired = null; } } try { + // Get the version node for the snapshot and update the version node to the successor to point to the + // previous node. for (Map.Entry entry : previousVersionNodeMap.entrySet()) { Set versionNode = localDataGraph.successors(entry.getValue()); if (versionNode.size() > 1) { throw new IOException(String.format("Snapshot %s version %d has multiple successors %s", currentIteratedSnapshotId, entry.getValue(), versionNode)); } + if (versionNode.isEmpty()) { + throw new IOException(String.format("Snapshot %s version %d doesn't have successor", + currentIteratedSnapshotId, entry.getValue())); + } entry.setValue(versionNode.iterator().next()); } } finally { + // Release the read lock acquired on the previous snapshot id if it was acquired. Now that the instance + // is no longer needed we can release the read lock for the snapshot iterated in the previous snapshot. if (previousReadLockAcquired != null) { previousReadLockAcquired.readLock().unlock(); } @@ -500,6 +523,7 @@ private Pair initialize(Lock snapIdLock, UUID snapId, } return Pair.of(ssLocalData, toResolveSnapshotId); } catch (IOException e) { + // Release all the locks in case of an exception and rethrow the exception. if (previousReadLockAcquired != null && haspreviousReadLockAcquiredAcquired) { previousReadLockAcquired.readLock().unlock(); } From 5d9fc4999d37f73dd9eb9bd66d63d6a571fa7d29 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 09:05:41 -0400 Subject: [PATCH 014/126] HDDS-13783. Add java doc comment Change-Id: Iad6f26cb71ec921c51ee2d138745df1a2663533f --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index b002dda14186..be892856c53e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -44,7 +44,6 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.stream.Collectors; -import jnr.ffi.annotations.In; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; From 2d8817603eef010a29b5fb431aa3be149ed2bd1c Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 09:23:23 -0400 Subject: [PATCH 015/126] HDDS-13783. Implement full lock Change-Id: Ic5f7e249cfb9cb3973cbcd4abd36b22a6ff8f5aa --- .../snapshot/OmSnapshotLocalDataManager.java | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index be892856c53e..b07b6627601e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -43,6 +43,7 @@ import java.util.UUID; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; @@ -55,9 +56,11 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.util.ObjectSerializer; import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ratis.util.function.CheckedSupplier; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; @@ -75,6 +78,9 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private final MutableGraph localDataGraph; private final Map versionNodeMap; private final OMMetadataManager omMetadataManager; + // Used for acquiring locks on the entire data structure. + private static ReadWriteLock fullLock; + // Locks should be always acquired by iterating through the snapshot chain to avoid deadlocks. private Striped locks; public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager, @@ -90,6 +96,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO } }; this.versionNodeMap = new HashMap<>(); + this.fullLock = new ReentrantReadWriteLock(); init(configuration); } @@ -246,6 +253,21 @@ private void init(OzoneConfiguration configuration) throws IOException { } } + public UncheckedAutoCloseableSupplier lock() { + this.fullLock.writeLock().lock(); + return new UncheckedAutoCloseableSupplier() { + @Override + public OMLockDetails get() { + return OMLockDetails.EMPTY_DETAILS_LOCK_ACQUIRED; + } + + @Override + public void close() { + fullLock.writeLock().unlock(); + } + }; + } + private void validateVersionRemoval(UUID snapshotId, int version) throws IOException { LocalDataVersionNode versionNode = getVersionNode(snapshotId, version); if (versionNode != null && localDataGraph.inDegree(versionNode) != 0) { @@ -559,15 +581,18 @@ public final class WritableOmSnapshotLocalDataProvider extends ReadableOmSnapsho private WritableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { super(snapshotId, locks.get(snapshotId).writeLock()); + fullLock.readLock().lock(); } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapshotIdToBeResolved) throws IOException { super(snapshotId, locks.get(snapshotId).writeLock(), null, snapshotIdToBeResolved); + fullLock.readLock().lock(); } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { super(snapshotId, locks.get(snapshotId).writeLock(), snapshotLocalDataSupplier, null); + fullLock.readLock().lock(); } public synchronized void commit() throws IOException { @@ -582,7 +607,12 @@ public synchronized void commit() throws IOException { FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); upsertNode(super.snapshotId, localDataVersionNodes); + } + @Override + public void close() { + super.close(); + fullLock.readLock().unlock(); } } From a3c4c690822f96435ff1f6135898d1c4eecab5e0 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 09:52:13 -0400 Subject: [PATCH 016/126] HDDS-13783. Refactor and move modify method into WritableOmSnapshotLocalDataProvider Change-Id: I3a004b4b435075a4348960aeed642e8da71e7e72 --- .../snapshot/OmSnapshotLocalDataManager.java | 81 ++++++++++--------- 1 file changed, 44 insertions(+), 37 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index b07b6627601e..a870347fc2e8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -253,6 +253,13 @@ private void init(OzoneConfiguration configuration) throws IOException { } } + /** + * Acquires a write lock and provides an auto-closeable supplier for specifying details + * of the lock acquisition. The lock is released when the returned supplier is closed. + * + * @return an instance of {@code UncheckedAutoCloseableSupplier} representing + * the acquired lock details, where the lock will automatically be released on close. + */ public UncheckedAutoCloseableSupplier lock() { this.fullLock.writeLock().lock(); return new UncheckedAutoCloseableSupplier() { @@ -286,43 +293,6 @@ private void validateVersionAddition(LocalDataVersionNode versionNode) throws IO } } - private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLocalData) - throws IOException { - SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); - for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { - validateVersionAddition(node); - } - UUID snapshotId = snapshotLocalData.getSnapshotId(); - Map existingVersions = getVersionNodeMap().containsKey(snapshotId) ? - getVersionNodeMap().get(snapshotId).getSnapshotVersions() : Collections.emptyMap(); - for (Map.Entry entry : existingVersions.entrySet()) { - if (!versionsToBeAdded.getSnapshotVersions().containsKey(entry.getKey())) { - validateVersionRemoval(snapshotId, entry.getKey()); - } - } - return versionsToBeAdded; - } - - private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { - SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); - Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : - existingSnapVersions.getSnapshotVersions(); - Map> predecessors = new HashMap<>(); - // Track all predecessors of the existing versions and remove the node from the graph. - for (Map.Entry existingVersion : existingVersions.entrySet()) { - predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersions.get(existingVersion))); - localDataGraph.removeNode(existingVersion.getValue()); - } - // Add the nodes to be added in the graph and map. - addSnapshotVersionMeta(snapshotId, snapshotVersions); - // Reconnect all the predecessors for existing nodes. - for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { - for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { - localDataGraph.putEdge(predecessor, entry.getValue()); - } - } - } - @Override public void close() { if (snapshotLocalDataSerializer != null) { @@ -595,6 +565,43 @@ private WritableOmSnapshotLocalDataProvider(UUID snapshotId, fullLock.readLock().lock(); } + private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLocalData) + throws IOException { + SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); + for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { + validateVersionAddition(node); + } + UUID snapshotId = snapshotLocalData.getSnapshotId(); + Map existingVersions = getVersionNodeMap().containsKey(snapshotId) ? + getVersionNodeMap().get(snapshotId).getSnapshotVersions() : Collections.emptyMap(); + for (Map.Entry entry : existingVersions.entrySet()) { + if (!versionsToBeAdded.getSnapshotVersions().containsKey(entry.getKey())) { + validateVersionRemoval(snapshotId, entry.getKey()); + } + } + return versionsToBeAdded; + } + + private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); + Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : + existingSnapVersions.getSnapshotVersions(); + Map> predecessors = new HashMap<>(); + // Track all predecessors of the existing versions and remove the node from the graph. + for (Map.Entry existingVersion : existingVersions.entrySet()) { + predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersions.get(existingVersion))); + localDataGraph.removeNode(existingVersion.getValue()); + } + // Add the nodes to be added in the graph and map. + addSnapshotVersionMeta(snapshotId, snapshotVersions); + // Reconnect all the predecessors for existing nodes. + for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { + for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { + localDataGraph.putEdge(predecessor, entry.getValue()); + } + } + } + public synchronized void commit() throws IOException { SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); From 686d0c77cea774ba44fff1901efdf86789e36e59 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 09:58:57 -0400 Subject: [PATCH 017/126] HDDS-13783. Make full lock non static Change-Id: I06990bc9ab8fc7e1eb7bec255646a650bd8c35fe --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index a870347fc2e8..34b6ec602139 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -79,7 +79,7 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private final Map versionNodeMap; private final OMMetadataManager omMetadataManager; // Used for acquiring locks on the entire data structure. - private static ReadWriteLock fullLock; + private final ReadWriteLock fullLock; // Locks should be always acquired by iterating through the snapshot chain to avoid deadlocks. private Striped locks; From 491a54b0698f34557eb982280ccdffc434d9a4d9 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 10:20:14 -0400 Subject: [PATCH 018/126] HDDS-13783. Fix remove Change-Id: I4c6c61c83aa9fadab8ecef854b99dcc0a89a2208 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 34b6ec602139..17b21cf8b43a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -589,8 +589,9 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) Map> predecessors = new HashMap<>(); // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { - predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersions.get(existingVersion))); - localDataGraph.removeNode(existingVersion.getValue()); + LocalDataVersionNode existingVersionNode = existingVersion.getValue(); + predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); + localDataGraph.removeNode(existingVersionNode); } // Add the nodes to be added in the graph and map. addSnapshotVersionMeta(snapshotId, snapshotVersions); From 5e69ee9a1e18e48df12d1a69b3694544dccb0b94 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 17:35:35 -0400 Subject: [PATCH 019/126] HDDS-13627. Fix findbugs Change-Id: I0e476322372a302572f1fe79cbf2e874bfeac2ed --- .../om/snapshot/TestOmSnapshotLocalDataManager.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index d0ac0ad19bcb..34bde4814a6e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -25,6 +25,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -86,7 +87,6 @@ public class TestOmSnapshotLocalDataManager { private AutoCloseable mocks; private File snapshotsDir; - private File dbLocation; @BeforeAll public static void setupClass() { @@ -115,10 +115,11 @@ public void setUp() throws IOException { this.snapshotsDir = tempDir.resolve("snapshots").toFile(); FileUtils.deleteDirectory(snapshotsDir); - snapshotsDir.mkdirs(); - dbLocation = tempDir.resolve("db").toFile(); + assertTrue(snapshotsDir.exists() || snapshotsDir.mkdirs()); + File dbLocation = tempDir.resolve("db").toFile(); FileUtils.deleteDirectory(dbLocation); - dbLocation.mkdirs(); + assertTrue(dbLocation.exists() || dbLocation.mkdirs()); + when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); when(rdbStore.getDbLocation()).thenReturn(dbLocation); @@ -161,7 +162,8 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { // Setup snapshot store mock File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); - snapshotDbLocation.mkdirs(); + assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs()); + List sstFiles = new ArrayList<>(); sstFiles.add(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key7")); sstFiles.add(createMockLiveFileMetaData("file2.sst", KEY_TABLE, "key3", "key9")); From d36622a5ad8b5eeb0431650f483a7e2481623275 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 22:34:22 -0400 Subject: [PATCH 020/126] HDDS-13785. Remove orphan versions from SnapshotLocalData Yaml file Change-Id: I31004e0c95dad64411c6fe848501a82f2f773cba --- .../apache/hadoop/ozone/OzoneConfigKeys.java | 5 + .../hadoop/ozone/om/OmSnapshotLocalData.java | 4 + .../hadoop/ozone/om/OmSnapshotManager.java | 6 +- .../snapshot/OmSnapshotLocalDataManager.java | 123 +++++++++++++++--- .../ozone/om/snapshot/SnapshotUtils.java | 11 ++ .../TestOmSnapshotLocalDataManager.java | 43 +++--- 6 files changed, 155 insertions(+), 37 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 1d47fb72958f..41eeb10e5c2c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -631,6 +631,11 @@ public final class OzoneConfigKeys { OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT = TimeUnit.DAYS.toMillis(30); + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL = + "ozone.om.snapshot.local.data.manager.service.interval"; + + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT = "5m"; + public static final String OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL = "ozone.om.snapshot.compaction.dag.prune.daemon.run.interval"; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5af678f903fb..7c29c8bc148e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -190,6 +190,10 @@ public void addVersionSSTFileInfos(List sstFiles, int previousSnaps this.versionSstFileInfos.put(version, new VersionMeta(previousSnapshotVersion, sstFiles)); } + public void removeVersionSSTFileInfos(int snapshotVersion) { + this.versionSstFileInfos.remove(snapshotVersion); + } + /** * Returns the checksum of the YAML representation. * @return checksum diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 743c1e584e25..ad3a820c2c95 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -196,10 +196,10 @@ public final class OmSnapshotManager implements AutoCloseable { private final AtomicInteger inFlightSnapshotCount = new AtomicInteger(0); public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager(), - ozoneManager.getConfiguration()); - boolean isFilesystemSnapshotEnabled = - ozoneManager.isFilesystemSnapshotEnabled(); + omMetadataManager.getSnapshotChainManager(), ozoneManager.getConfiguration()); + boolean isFilesystemSnapshotEnabled = ozoneManager.isFilesystemSnapshotEnabled(); LOG.info("Ozone filesystem snapshot feature is {}.", isFilesystemSnapshotEnabled ? "enabled" : "disabled"); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 17b21cf8b43a..deb44e02034f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -21,6 +21,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import com.google.common.annotations.VisibleForTesting; @@ -41,6 +43,8 @@ import java.util.Set; import java.util.Stack; import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -48,6 +52,7 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.Scheduler; import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -55,6 +60,7 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.util.ObjectSerializer; @@ -73,6 +79,7 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); private static final String SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME = "snapshot_local_data_lock"; + private static final String LOCAL_DATA_MANAGER_SERVICE_NAME = "OmSnapshotLocalDataManagerService"; private final ObjectSerializer snapshotLocalDataSerializer; private final MutableGraph localDataGraph; @@ -82,8 +89,12 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private final ReadWriteLock fullLock; // Locks should be always acquired by iterating through the snapshot chain to avoid deadlocks. private Striped locks; + private Map snapshotToBeCheckedForOrphans; + private Scheduler scheduler; + private volatile boolean closed; public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager, + SnapshotChainManager snapshotChainManager, OzoneConfiguration configuration) throws IOException { this.localDataGraph = GraphBuilder.directed().build(); this.omMetadataManager = omMetadataManager; @@ -97,7 +108,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO }; this.versionNodeMap = new HashMap<>(); this.fullLock = new ReentrantReadWriteLock(); - init(configuration); + init(configuration, snapshotChainManager); } @VisibleForTesting @@ -186,7 +197,7 @@ private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) throws IOException { - if (!versionNodeMap.containsKey(snapshotId)) { + if (!versionNodeMap.containsKey(snapshotId) && !snapshotVersionsMeta.getSnapshotVersions().isEmpty()) { for (LocalDataVersionNode versionNode : snapshotVersionsMeta.getSnapshotVersions().values()) { validateVersionAddition(versionNode); LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : @@ -226,10 +237,28 @@ public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) } } - private void init(OzoneConfiguration configuration) throws IOException { + private void increamentOrphanCheckCount(UUID snapshotId) { + this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> v == null ? 1 : v + 1); + } + + private void decreamentOrphanCheckCount(UUID snapshotId, int decrementBy) { + this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> { + if (v == null) { + return null; + } + int newValue = v - decrementBy; + if (newValue <= 0) { + return null; + } + return newValue; + }); + } + + private void init(OzoneConfiguration configuration, SnapshotChainManager snapshotChainManager) throws IOException { boolean fair = configuration.getBoolean(OZONE_MANAGER_FAIR_LOCK, OZONE_MANAGER_FAIR_LOCK_DEFAULT); String stripeSizeKey = OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX + SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME; int size = configuration.getInt(stripeSizeKey, OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT); + this.snapshotToBeCheckedForOrphans = new ConcurrentHashMap<>(); this.locks = SimpleStriped.readWriteLock(size, fair); RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); @@ -251,6 +280,48 @@ private void init(OzoneConfiguration configuration) throws IOException { } addVersionNodeWithDependents(snapshotLocalData); } + for (UUID snapshotId : versionNodeMap.keySet()) { + increamentOrphanCheckCount(snapshotId); + } + this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1); + long snapshotLocalDataManagerServiceInterval = configuration.getTimeDuration( + OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT, + TimeUnit.MILLISECONDS); + this.scheduler.scheduleWithFixedDelay( + () -> { + try { + checkOrphanSnapshotVersions(omMetadataManager, snapshotChainManager); + } catch (IOException e) { + LOG.error("Exception while checking orphan snapshot versions", e); + } + }, snapshotLocalDataManagerServiceInterval, snapshotLocalDataManagerServiceInterval, TimeUnit.MILLISECONDS); + } + + private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChainManager chainManager) + throws IOException { + for (Map.Entry entry : snapshotToBeCheckedForOrphans.entrySet()) { + UUID snapshotId = entry.getKey(); + int countBeforeCheck = entry.getValue(); + try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = + new WritableOmSnapshotLocalDataProvider(snapshotId)) { + OmSnapshotLocalData snapshotLocalData = snapshotLocalDataProvider.getSnapshotLocalData(); + boolean isSnapshotPurged = SnapshotUtils.isSnapshotPurged(chainManager, metadataManager, snapshotId); + for (Map.Entry integerLocalDataVersionNodeEntry : getVersionNodeMap().get( + snapshotId).getSnapshotVersions().entrySet()) { + LocalDataVersionNode versionEntry = integerLocalDataVersionNodeEntry.getValue(); + // remove the version entry if it is not referenced by any other snapshot version node. For version node 0 + // a newly created snapshot version could point to a version with indegree 0 in such a scenario a version 0 + // node can be only deleted if the snapshot is also purged. + boolean toRemove = localDataGraph.inDegree(versionEntry) == 0 + && (versionEntry.getVersion() != 0 || isSnapshotPurged); + if (toRemove) { + snapshotLocalData.removeVersionSSTFileInfos(versionEntry.getVersion()); + } + } + snapshotLocalDataProvider.commit(); + } + decreamentOrphanCheckCount(snapshotId, countBeforeCheck); + } } /** @@ -373,16 +444,12 @@ public OmSnapshotLocalData getSnapshotLocalData() { return snapshotLocalData; } - public OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { + public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { if (!isPreviousSnapshotLoaded) { - synchronized (this) { - if (!isPreviousSnapshotLoaded) { - File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); - this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : - snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); - this.isPreviousSnapshotLoaded = true; - } - } + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); + this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : + snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + this.isPreviousSnapshotLoaded = true; } return previousSnapshotLocalData; } @@ -593,6 +660,7 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); localDataGraph.removeNode(existingVersionNode); } + // Add the nodes to be added in the graph and map. addSnapshotVersionMeta(snapshotId, snapshotVersions); // Reconnect all the predecessors for existing nodes. @@ -601,19 +669,36 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) localDataGraph.putEdge(predecessor, entry.getValue()); } } + // The previous snapshotId could have become an orphan entry or could have orphan versions. + if (existingSnapVersions != null) { + increamentOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); + } } public synchronized void commit() throws IOException { SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); - String tmpFilePath = filePath + ".tmp"; - File tmpFile = new File(tmpFilePath); - if (tmpFile.exists()) { - tmpFile.delete(); + File snapshotLocalDataFile = new File(filePath); + if (!localDataVersionNodes.getSnapshotVersions().isEmpty()) { + String tmpFilePath = filePath + ".tmp"; + File tmpFile = new File(tmpFilePath); + boolean tmpFileExists = tmpFile.exists(); + if (tmpFileExists) { + tmpFileExists = !tmpFile.delete(); + } + if (!tmpFileExists) { + throw new IOException("Unable to delete tmp file " + tmpFilePath); + } + snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); + FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING); + } else if (snapshotLocalDataFile.exists()) { + LOG.info("Deleting Yaml file corresponding to snapshotId: {} in path : {}", + super.snapshotId, snapshotLocalDataFile.getAbsolutePath()); + if (snapshotLocalDataFile.delete()) { + throw new IOException("Unable to delete file " + snapshotLocalDataFile.getAbsolutePath()); + } } - snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); - FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, - StandardCopyOption.REPLACE_EXISTING); upsertNode(super.snapshotId, localDataVersionNodes); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index ea6d88c8e194..f1ef5035e7c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -34,6 +34,8 @@ import java.util.Objects; import java.util.Optional; import java.util.UUID; +import org.apache.hadoop.hdds.utils.db.CodecException; +import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -89,6 +91,15 @@ public static SnapshotInfo getSnapshotInfo(final OzoneManager ozoneManager, return snapshotInfo; } + public static boolean isSnapshotPurged(SnapshotChainManager chainManager, OMMetadataManager omMetadataManager, + UUID snapshotId) throws RocksDatabaseException, CodecException { + String tableKey = chainManager.getTableKey(snapshotId); + if (tableKey == null) { + return true; + } + return !omMetadataManager.getSnapshotInfoTable().isExist(tableKey); + } + public static SnapshotInfo getSnapshotInfo(OzoneManager ozoneManager, SnapshotChainManager chainManager, UUID snapshotId) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 5be849ab7641..4d676fc90003 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -25,7 +25,10 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.when; import java.io.File; @@ -50,6 +53,7 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.util.YamlSerializer; @@ -61,6 +65,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.mockito.Mock; +import org.mockito.MockedStatic; import org.mockito.MockitoAnnotations; import org.rocksdb.LiveFileMetaData; import org.yaml.snakeyaml.Yaml; @@ -77,6 +82,9 @@ public class TestOmSnapshotLocalDataManager { @Mock private OMMetadataManager omMetadataManager; + @Mock + private SnapshotChainManager chainManager; + @Mock private RDBStore rdbStore; @@ -91,6 +99,7 @@ public class TestOmSnapshotLocalDataManager { private File snapshotsDir; private File dbLocation; + private MockedStatic snapshotUtilMock; @BeforeAll public static void setupClass() { @@ -117,16 +126,17 @@ public void setUp() throws IOException { // Setup mock behavior when(omMetadataManager.getStore()).thenReturn(rdbStore); - this.snapshotsDir = tempDir.resolve("snapshots").toFile(); FileUtils.deleteDirectory(snapshotsDir); - snapshotsDir.mkdirs(); + assertTrue(snapshotsDir.exists() || snapshotsDir.mkdirs()); dbLocation = tempDir.resolve("db").toFile(); FileUtils.deleteDirectory(dbLocation); - dbLocation.mkdirs(); + assertTrue(dbLocation.exists() || dbLocation.mkdirs()); when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); when(rdbStore.getDbLocation()).thenReturn(dbLocation); + this.snapshotUtilMock = mockStatic(SnapshotUtils.class); + snapshotUtilMock.when(() -> SnapshotUtils.isSnapshotPurged(any(), any(), any())).thenReturn(false); } @AfterEach @@ -137,11 +147,14 @@ public void tearDown() throws Exception { if (mocks != null) { mocks.close(); } + if (snapshotUtilMock != null) { + snapshotUtilMock.close(); + } } @Test public void testConstructor() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); assertNotNull(localDataManager); } @@ -150,7 +163,7 @@ public void testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOExce UUID snapshotId = UUID.randomUUID(); SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); File yamlPath = new File(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); assertNotNull(yamlPath); @@ -166,7 +179,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { // Setup snapshot store mock File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); - snapshotDbLocation.mkdirs(); + assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs()); List sstFiles = new ArrayList<>(); sstFiles.add(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key7")); sstFiles.add(createMockLiveFileMetaData("file2.sst", KEY_TABLE, "key3", "key9")); @@ -180,7 +193,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { RocksDatabase rocksDatabase = mock(RocksDatabase.class); when(snapshotStore.getDb()).thenReturn(rocksDatabase); when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); @@ -203,7 +216,7 @@ public void testGetOmSnapshotLocalDataWithSnapshotInfo() throws IOException { // Create and write snapshot local data file OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); // Write the file manually for testing Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId())); @@ -224,7 +237,7 @@ public void testGetOmSnapshotLocalDataWithMismatchedSnapshotId() throws IOExcept // Create local data with wrong snapshot ID OmSnapshotLocalData localData = createMockLocalData(wrongSnapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotId)); writeLocalDataToFile(localData, yamlPath); @@ -240,7 +253,7 @@ public void testGetOmSnapshotLocalDataWithFile() throws IOException { OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); Path yamlPath = tempDir.resolve("test-snapshot.yaml"); writeLocalDataToFile(localData, yamlPath); @@ -258,7 +271,7 @@ public void testAddVersionNodeWithDependents() throws IOException { .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); UUID snapshotId = versionIds.get(0); UUID previousSnapshotId = versionIds.get(1); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); // Create snapshot directory structure and files createSnapshotLocalDataFile(snapshotId, previousSnapshotId); createSnapshotLocalDataFile(previousSnapshotId, null); @@ -274,7 +287,7 @@ public void testAddVersionNodeWithDependentsAlreadyExists() throws IOException { createSnapshotLocalDataFile(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); @@ -296,7 +309,7 @@ public void testInitWithExistingYamlFiles() throws IOException { createSnapshotLocalDataFile(snapshotId, previousSnapshotId); // Initialize - should load existing files - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); assertNotNull(localDataManager); Map versionMap = @@ -316,13 +329,13 @@ public void testInitWithInvalidPathThrowsException() throws IOException { // Should throw IOException during init assertThrows(IOException.class, () -> { - new OmSnapshotLocalDataManager(omMetadataManager, conf); + new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); }); } @Test public void testClose() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); // Should not throw exception localDataManager.close(); From ee213d15e5d9f9ef1a59345f626a2d355e2ae126 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 22:39:12 -0400 Subject: [PATCH 021/126] HDDS-13785. Fix findbugs Change-Id: Id317c8b56e8b25c122b68eaf96599b9690d08f79 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index deb44e02034f..7df7fd09a2ae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -365,12 +365,17 @@ private void validateVersionAddition(LocalDataVersionNode versionNode) throws IO } @Override - public void close() { - if (snapshotLocalDataSerializer != null) { - try { - snapshotLocalDataSerializer.close(); - } catch (IOException e) { - LOG.error("Failed to close snapshot local data serializer", e); + public synchronized void close() { + if (!closed) { + if (snapshotLocalDataSerializer != null) { + try { + snapshotLocalDataSerializer.close(); + } catch (IOException e) { + LOG.error("Failed to close snapshot local data serializer", e); + } + } + if (scheduler != null) { + scheduler.close(); } } } From a95604ed3e21e4ed5a2f884f1166f4641362c15b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 23:25:39 -0400 Subject: [PATCH 022/126] HDDS-13627. Fix tests Change-Id: Ie5e5f3dab4324103e8855dd15619d7755f0422e6 --- .../om/response/snapshot/OMSnapshotPurgeResponse.java | 9 ++------- .../filter/AbstractReclaimableFilterTest.java | 11 ++++++++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 75ba2a8f9501..267547bc1e54 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -23,9 +23,7 @@ import com.google.common.annotations.VisibleForTesting; import jakarta.annotation.Nonnull; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.List; import java.util.Map; import org.apache.commons.io.FileUtils; @@ -130,14 +128,11 @@ private void deleteCheckpointDirectory(OmSnapshotLocalDataManager snapshotLocalD boolean acquiredSnapshotLock = omLockDetails.isLockAcquired(); if (acquiredSnapshotLock) { Path snapshotDirPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo); - // TODO: Do not delete on snapshot purge. OmSnapshotLocalDataManager should delete orphan local data files. - Path snapshotLocalDataPath = Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); try { FileUtils.deleteDirectory(snapshotDirPath.toFile()); - Files.deleteIfExists(snapshotLocalDataPath); } catch (IOException ex) { - LOG.error("Failed to delete snapshot directory {} and/or local data file {} for snapshot {}", - snapshotDirPath, snapshotLocalDataPath, snapshotInfo.getTableKey(), ex); + LOG.error("Failed to delete snapshot directory {} for snapshot {}", + snapshotDirPath, snapshotInfo.getTableKey(), ex); } finally { omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_DB_LOCK, snapshotInfo.getSnapshotId().toString()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java index e8c362d9a5f4..13ba79a77f82 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java @@ -27,6 +27,7 @@ import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockConstruction; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.when; @@ -61,6 +62,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; @@ -188,9 +190,9 @@ private void mockOzoneManager(BucketLayout bucketLayout) throws IOException { private void mockOmSnapshotManager(OzoneManager om) throws RocksDBException, IOException { try (MockedStatic rocksdb = Mockito.mockStatic(ManagedRocksDB.class); MockedConstruction mockedSnapshotDiffManager = - Mockito.mockConstruction(SnapshotDiffManager.class, (mock, context) -> + mockConstruction(SnapshotDiffManager.class, (mock, context) -> doNothing().when(mock).close()); - MockedConstruction mockedCache = Mockito.mockConstruction(SnapshotCache.class, + MockedConstruction mockedCache = mockConstruction(SnapshotCache.class, (mock, context) -> { Map> map = new HashMap<>(); when(mock.get(any(UUID.class))).thenAnswer(i -> { @@ -237,7 +239,10 @@ private void mockOmSnapshotManager(OzoneManager om) throws RocksDBException, IOE conf.set(OZONE_METADATA_DIRS, testDir.toAbsolutePath().toFile().getAbsolutePath()); when(om.getConfiguration()).thenReturn(conf); when(om.isFilesystemSnapshotEnabled()).thenReturn(true); - this.omSnapshotManager = new OmSnapshotManager(om); + try (MockedConstruction ignored = + mockConstruction(OmSnapshotLocalDataManager.class)) { + this.omSnapshotManager = new OmSnapshotManager(om); + } } } From 5a90fcfa158543e2f444d2ac888d9b03ace99301 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 23:29:51 -0400 Subject: [PATCH 023/126] HDDS-13627. remove checksum interface Change-Id: I55bd5c3ef7fc32910a9111328638de2edffcd541 --- .../apache/hadoop/ozone/util/Checksum.java | 28 ------------------- 1 file changed, 28 deletions(-) delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java deleted file mode 100644 index 4d11bde5aef3..000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.util; - -import org.apache.hadoop.hdds.utils.db.CopyObject; - -/** - * Represents a generic interface for objects capable of generating or providing - * a checksum value. - */ -public interface Checksum> extends CopyObject { - String getChecksum(); -} From 20d7d6add8a0f16362b3d513a348b4f9afeff809 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 06:35:33 -0400 Subject: [PATCH 024/126] HDDS-13627. Fix test failures Change-Id: I880997d3eebdf378f14c203c61c2d63b2d17552e --- .../request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index 0fb26a4cd993..35053882eeda 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -190,8 +190,6 @@ public void testValidateAndUpdateCache() throws Exception { // Check if all the checkpoints are cleared. for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); - assertFalse(Files.exists(Paths.get( - OmSnapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(checkpoint)))); } assertEquals(initialSnapshotPurgeCount + 1, getOmSnapshotIntMetrics().getNumSnapshotPurges()); assertEquals(initialSnapshotPurgeFailCount, getOmSnapshotIntMetrics().getNumSnapshotPurgeFails()); From ae655cbfd8c10321d05ed74cd248b1e2fb22818f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 10:30:07 -0400 Subject: [PATCH 025/126] HDDS-13785. Set defrag flag on previous snapshotId update Change-Id: I13ba8e2fd012a3c964d657e83496c93a4f55a3be --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 7df7fd09a2ae..2054e3b951b3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -651,6 +651,11 @@ private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLo validateVersionRemoval(snapshotId, entry.getKey()); } } + SnapshotVersionsMeta existingVersionMeta = getVersionNodeMap().get(snapshotId); + // Set the needsDefrag if the new previous snapshotId is different from the existing one or if this is a new + // snapshot yaml file. + snapshotLocalData.setNeedsDefrag(existingVersionMeta == null + || !Objects.equals(existingVersionMeta.getPreviousSnapshotId(), snapshotLocalData.getPreviousSnapshotId())); return versionsToBeAdded; } From d419283eced9b5aff28a924b1801cda63321e0ab Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 14:44:56 -0400 Subject: [PATCH 026/126] HDDS-13783. Fix findbugs Change-Id: I02de81771c9102f1212bf1962e65095910ab8207 --- .../snapshot/OmSnapshotLocalDataManager.java | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 17b21cf8b43a..58a5923bc7bd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -373,16 +373,12 @@ public OmSnapshotLocalData getSnapshotLocalData() { return snapshotLocalData; } - public OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { + public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { if (!isPreviousSnapshotLoaded) { - synchronized (this) { - if (!isPreviousSnapshotLoaded) { - File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); - this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : - snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); - this.isPreviousSnapshotLoaded = true; - } - } + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); + this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : + snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + this.isPreviousSnapshotLoaded = true; } return previousSnapshotLocalData; } @@ -608,8 +604,12 @@ public synchronized void commit() throws IOException { String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); String tmpFilePath = filePath + ".tmp"; File tmpFile = new File(tmpFilePath); - if (tmpFile.exists()) { - tmpFile.delete(); + boolean tmpFileExists = tmpFile.exists(); + if (tmpFileExists) { + tmpFileExists = !tmpFile.delete(); + } + if (!tmpFileExists) { + throw new IOException("Unable to delete tmp file " + tmpFilePath); } snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, From 8a443087a096fea473f55ab65680943c72b32011 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 15:39:52 -0400 Subject: [PATCH 027/126] HDDS-13783. Fix pmd Change-Id: I8360183ef8ac68a95a05a6a2b00bb7ede5d57d12 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 58a5923bc7bd..393b988bd41e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -637,22 +637,10 @@ private LocalDataVersionNode(UUID snapshotId, int version, UUID previousSnapshot this.version = version; } - private int getVersion() { - return version; - } - - private UUID getSnapshotId() { - return snapshotId; - } - private UUID getPreviousSnapshotId() { return previousSnapshotId; } - private int getPreviousSnapshotVersion() { - return previousSnapshotVersion; - } - @Override public boolean equals(Object o) { if (!(o instanceof LocalDataVersionNode)) { From 4d272d190f8aa8c2a4426389caa046e50c7140a6 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 18:23:45 -0400 Subject: [PATCH 028/126] HDDS-13783. Fix lock release Change-Id: Ia7b386dab7558275be659bc32f838ccdd7f46ef5 --- .../snapshot/OmSnapshotLocalDataManager.java | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 393b988bd41e..2f417dd5f168 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -47,6 +47,7 @@ import java.util.stream.Collectors; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; @@ -340,6 +341,7 @@ public class ReadableOmSnapshotLocalDataProvider implements AutoCloseable { private final UUID snapshotId; private final Lock lock; private final OmSnapshotLocalData snapshotLocalData; + private final Lock previousLock; private OmSnapshotLocalData previousSnapshotLocalData; private volatile boolean isPreviousSnapshotLoaded = false; private final UUID resolvedPreviousSnapshotId; @@ -361,10 +363,11 @@ protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock, UUID snapshotIdToBeResolved) throws IOException { this.snapshotId = snapshotId; this.lock = lock; - Pair pair = initialize(lock, snapshotId, snapshotIdToBeResolved, + Triple pair = initialize(lock, snapshotId, snapshotIdToBeResolved, snapshotLocalDataSupplier); - this.snapshotLocalData = pair.getKey(); - this.resolvedPreviousSnapshotId = pair.getValue(); + this.snapshotLocalData = pair.getLeft(); + this.previousLock = pair.getMiddle(); + this.resolvedPreviousSnapshotId = pair.getRight(); this.previousSnapshotLocalData = null; this.isPreviousSnapshotLoaded = false; } @@ -387,7 +390,7 @@ public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IO * Intializer the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the * snapshotId to be resolved by iterating through the chain of previous snapshot ids. */ - private Pair initialize(Lock snapIdLock, UUID snapId, UUID toResolveSnapshotId, + private Triple initialize(Lock snapIdLock, UUID snapId, UUID toResolveSnapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { snapIdLock.lock(); @@ -408,7 +411,7 @@ private Pair initialize(Lock snapIdLock, UUID snapId, "to snapshotId " + ssLocalData.getSnapshotId() + ". Expected snapshotId " + snapId); } // Get previous snapshotId and acquire read lock on the id. We need to do this outside the loop instead of a - // do while loop since the nodes that may be added may not be present in the graph so it may not be possible + // do while loop since the nodes that need be added may not be present in the graph so it may not be possible // to iterate through the chain. UUID previousSnapshotId = ssLocalData.getPreviousSnapshotId(); if (previousSnapshotId != null) { @@ -508,7 +511,9 @@ private Pair initialize(Lock snapIdLock, UUID snapId, } else { toResolveSnapshotId = null; } - return Pair.of(ssLocalData, toResolveSnapshotId); + return Triple.of(ssLocalData, + previousReadLockAcquired != null ? previousReadLockAcquired.readLock() : null , + toResolveSnapshotId); } catch (IOException e) { // Release all the locks in case of an exception and rethrow the exception. if (previousReadLockAcquired != null && haspreviousReadLockAcquiredAcquired) { @@ -521,8 +526,8 @@ private Pair initialize(Lock snapIdLock, UUID snapId, @Override public void close() { - if (resolvedPreviousSnapshotId != null) { - locks.get(resolvedPreviousSnapshotId).readLock().unlock(); + if (previousLock != null) { + previousLock.unlock(); } lock.unlock(); } @@ -608,7 +613,7 @@ public synchronized void commit() throws IOException { if (tmpFileExists) { tmpFileExists = !tmpFile.delete(); } - if (!tmpFileExists) { + if (tmpFileExists) { throw new IOException("Unable to delete tmp file " + tmpFilePath); } snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); From 2a38f598dfa0a3090e8c237821af6a5e7bb4b5b1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 18:54:33 -0400 Subject: [PATCH 029/126] HDDS-13627. address review comments Change-Id: Id19a1c451f1cdd6b08879e39b4ac2bae5d4517dc --- .../om/snapshot/OmSnapshotLocalDataManager.java | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 75611955b6f8..46f111f0f320 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -147,7 +147,7 @@ private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { return versionNodeMap.get(snapshotId).getVersionNode(version); } - private boolean addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) + private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) throws IOException { if (!versionNodeMap.containsKey(snapshotId)) { for (LocalDataVersionNode versionNode : snapshotVersionsMeta.getSnapshotVersions().values()) { @@ -166,9 +166,7 @@ private boolean addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta sna } } versionNodeMap.put(snapshotId, snapshotVersionsMeta); - return true; } - return false; } public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { @@ -231,10 +229,10 @@ public void close() { } static final class LocalDataVersionNode { - private UUID snapshotId; - private int version; - private UUID previousSnapshotId; - private int previousSnapshotVersion; + private final UUID snapshotId; + private final int version; + private final UUID previousSnapshotId; + private final int previousSnapshotVersion; private LocalDataVersionNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { this.previousSnapshotId = previousSnapshotId; @@ -248,7 +246,6 @@ public boolean equals(Object o) { if (!(o instanceof LocalDataVersionNode)) { return false; } - LocalDataVersionNode that = (LocalDataVersionNode) o; return version == that.version && previousSnapshotVersion == that.previousSnapshotVersion && snapshotId.equals(that.snapshotId) && Objects.equals(previousSnapshotId, that.previousSnapshotId); From ca098cf1ebd32827d204e41367193b9ddb02e167 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 21:25:24 -0400 Subject: [PATCH 030/126] HDDS-13783. Make graph updates synchronized Change-Id: I1b9d4227870d3918fdbf293e07f1e6a87bdcfd6c --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 2f417dd5f168..a543a65c501f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -201,7 +201,7 @@ private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapsh } } - public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { + void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { if (versionNodeMap.containsKey(snapshotLocalData.getSnapshotId())) { return; } @@ -583,7 +583,7 @@ private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLo return versionsToBeAdded; } - private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : existingSnapVersions.getSnapshotVersions(); From 67d4b3d95c395fe43b23f6905e13e427194f2c0b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 21:35:12 -0400 Subject: [PATCH 031/126] HDDS-13627. Make add version with dependents package private Change-Id: I9f09fb50aafe82718bce9884c46d26b4862b5c04 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 46f111f0f320..3c529abaf3c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -169,7 +169,7 @@ private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapsh } } - public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { + void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { if (versionNodeMap.containsKey(snapshotLocalData.getSnapshotId())) { return; } From 665f4116df6f026d53d0f31da42c43b961e71a07 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 23:29:29 -0400 Subject: [PATCH 032/126] HDDS-13783. Fix checkstyle Change-Id: I9c2cab7442f87c64b8b342910b1949ea133c72ad --- .../om/snapshot/OmSnapshotLocalDataManager.java | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 311590d660de..a543a65c501f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -27,9 +27,6 @@ import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; import com.google.common.util.concurrent.Striped; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.graph.GraphBuilder; -import com.google.common.graph.MutableGraph; import java.io.File; import java.io.IOException; import java.nio.file.Path; @@ -53,17 +50,6 @@ import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.SimpleStriped; -import java.nio.file.Paths; -import java.util.Arrays; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.Stack; -import java.util.UUID; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; From 2894e404db72b82abf5b0680f201f5ee7f5bf00b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 23:34:22 -0400 Subject: [PATCH 033/126] HDDS-13783. Fix merge conflict Change-Id: Idfd6f31fdea8c0166e02ae2a95984ad520b4c0d1 --- .../org/apache/hadoop/ozone/om/SnapshotDefragService.java | 8 ++++---- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 436593b861b6..904ade6abe77 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -43,6 +43,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.MultiSnapshotLocks; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -130,11 +131,10 @@ private boolean needsDefragmentation(SnapshotInfo snapshotInfo) { String snapshotPath = OmSnapshotManager.getSnapshotPath( ozoneManager.getConfiguration(), snapshotInfo); - try { + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider readableOmSnapshotLocalDataProvider = + ozoneManager.getOmSnapshotManager().getSnapshotLocalDataManager().getOmSnapshotLocalData(snapshotInfo)) { // Read snapshot local metadata from YAML - OmSnapshotLocalData snapshotLocalData = ozoneManager.getOmSnapshotManager() - .getSnapshotLocalDataManager() - .getOmSnapshotLocalData(snapshotInfo); + OmSnapshotLocalData snapshotLocalData = readableOmSnapshotLocalDataProvider.getSnapshotLocalData(); // Check if snapshot needs compaction (defragmentation) boolean needsDefrag = snapshotLocalData.getNeedsDefrag(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index a543a65c501f..54adca46ddaf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -511,8 +511,7 @@ private Triple initialize(Lock snapIdLock, UUID } else { toResolveSnapshotId = null; } - return Triple.of(ssLocalData, - previousReadLockAcquired != null ? previousReadLockAcquired.readLock() : null , + return Triple.of(ssLocalData, previousReadLockAcquired != null ? previousReadLockAcquired.readLock() : null, toResolveSnapshotId); } catch (IOException e) { // Release all the locks in case of an exception and rethrow the exception. From ea0ab16c737843b99adde981d5c015d8d2535f7e Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 06:26:19 -0400 Subject: [PATCH 034/126] HDDS-13783. Add write version api Change-Id: If7d52317a65df2d941cc9dc6befd8215e7418f60 --- .../hadoop/ozone/om/OmSnapshotLocalData.java | 5 +- .../snapshot/OmSnapshotLocalDataManager.java | 7 ++ .../ozone/om/TestOmSnapshotLocalDataYaml.java | 8 +- .../TestOmSnapshotLocalDataManager.java | 99 +++++++++++++++++++ 4 files changed, 113 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 83ad02fb14bc..d78430b6cae6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -184,9 +184,10 @@ public void setPreviousSnapshotId(UUID previousSnapshotId) { * Adds an entry to the defragged SST file list. * @param sstFiles SST file name */ - public void addVersionSSTFileInfos(List sstFiles, int previousSnapshotVersion) { + public void addVersionSSTFileInfos(List sstFiles, int previousSnapshotVersion) { version++; - this.versionSstFileInfos.put(version, new VersionMeta(previousSnapshotVersion, sstFiles)); + this.versionSstFileInfos.put(version, new VersionMeta(previousSnapshotVersion, sstFiles.stream() + .map(SstFileInfo::new).collect(Collectors.toList()))); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 54adca46ddaf..0f658348bfbe 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -36,6 +36,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -62,6 +63,7 @@ import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ratis.util.function.CheckedSupplier; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.rocksdb.LiveFileMetaData; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; @@ -603,6 +605,11 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps } } + public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { + List sstFiles = OmSnapshotManager.getSnapshotSSTFileList(snapshotStore); + this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, getPreviousSnapshotLocalData().getVersion()); + } + public synchronized void commit() throws IOException { SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index 23d332ae75b9..b234014ebbc0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -130,11 +130,11 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw // Add some defragged SST files dataYaml.addVersionSSTFileInfos(ImmutableList.of( - new SstFileInfo("defragged-sst1", "k1", "k2", "table1"), - new SstFileInfo("defragged-sst2", "k3", "k4", "table2")), + createLiveFileMetaData("defragged-sst1", "table1", "k1", "k2"), + createLiveFileMetaData("defragged-sst2", "table2", "k3", "k4")), 1); dataYaml.addVersionSSTFileInfos(Collections.singletonList( - new SstFileInfo("defragged-sst3", "k4", "k5", "table1")), 3); + createLiveFileMetaData("defragged-sst3", "table1", "k4", "k5")), 3); File yamlFile = new File(testRoot, yamlFilePath); @@ -202,7 +202,7 @@ public void testUpdateSnapshotDataFile() throws IOException { dataYaml.setSstFiltered(false); dataYaml.setNeedsDefrag(false); dataYaml.addVersionSSTFileInfos( - singletonList(new SstFileInfo("defragged-sst4", "k5", "k6", "table3")), 5); + singletonList(createLiveFileMetaData("defragged-sst4", "table3", "k5", "k6")), 5); // Write updated data back to file omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index f77f4fa5c581..e63a557ca83c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -26,9 +26,15 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.when; +import com.google.common.util.concurrent.Striped; import java.io.File; import java.io.IOException; import java.nio.file.Path; @@ -39,12 +45,15 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -61,7 +70,9 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; import org.mockito.Mock; +import org.mockito.MockedStatic; import org.mockito.MockitoAnnotations; import org.rocksdb.LiveFileMetaData; import org.yaml.snakeyaml.Yaml; @@ -92,6 +103,11 @@ public class TestOmSnapshotLocalDataManager { private File snapshotsDir; + private static final String READ_LOCK_MESSAGE_ACQUIRE = "readLock acquire"; + private static final String READ_LOCK_MESSAGE_UNLOCK = "readLock unlock"; + private static final String WRITE_LOCK_MESSAGE_ACQUIRE = "writeLock acquire"; + private static final String WRITE_LOCK_MESSAGE_UNLOCK = "writeLock unlock"; + @BeforeAll public static void setupClass() { conf = new OzoneConfiguration(); @@ -140,6 +156,89 @@ public void tearDown() throws Exception { } } + private String getReadLockMessageAcquire(int index) { + return READ_LOCK_MESSAGE_ACQUIRE + index; + } + + private String getReadLockMessageUnlock(int index) { + return READ_LOCK_MESSAGE_UNLOCK + index; + } + + private String getWriteLockMessageAcquire(int index) { + return WRITE_LOCK_MESSAGE_ACQUIRE + index; + } + + private String getWriteLockMessageUnlock(int index) { + return WRITE_LOCK_MESSAGE_UNLOCK + index; + } + + private MockedStatic mockStripedLock(Map lockMap, int numLocks, + List messageCaptorer) { + MockedStatic mockedStatic = mockStatic(SimpleStriped.class); + Striped stripedLock = mock(Striped.class); + + List readWriteLocks = new ArrayList<>(); + for (int idx = 0; idx < numLocks; idx++) { + final int lockIndex = idx; + ReadWriteLock readWriteLock = mock(ReadWriteLock.class); + Lock readLock = mock(Lock.class); + Lock writeLock = mock(Lock.class); + when(readWriteLock.readLock()).thenReturn(readLock); + when(readWriteLock.writeLock()).thenReturn(writeLock); + doAnswer(invocationOnMock -> { + messageCaptorer.add(getReadLockMessageAcquire(lockIndex)); + return null; + }).when(readLock).lock(); + doAnswer(invocationOnMock -> { + messageCaptorer.add(getReadLockMessageUnlock(lockIndex)); + return null; + }).when(readLock).unlock(); + + doAnswer(invocationOnMock -> { + messageCaptorer.add(getWriteLockMessageAcquire(lockIndex)); + return null; + }).when(writeLock).lock(); + doAnswer(invocationOnMock -> { + messageCaptorer.add(getWriteLockMessageUnlock(lockIndex)); + return null; + }).when(writeLock).unlock(); + } + when(stripedLock.get(any())).thenAnswer(i -> { + if (lockMap.containsKey(i.getArgument(0))) { + return readWriteLocks.get(lockMap.get(i.getArgument(0))); + } + return readWriteLocks.get(0); + }); + mockedStatic.when(() -> SimpleStriped.readWriteLock(anyInt(), anyBoolean())).thenReturn(stripedLock); + return mockedStatic; + } + + private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataManager, + int numberOfSnapshots) { + List snapshotInfos = new ArrayList<>(); + SnapshotInfo previouseSnapshotInfo = null; + + for (int i = 0; i < numberOfSnapshots; i++) { + java.util.UUID snapshotId = java.util.UUID.randomUUID(); + SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, previouseSnapshotInfo == null ? null + : previouseSnapshotInfo.getSnapshotId()); + OmSnapshotLocalData localData = createMockLocalData(snapshotId, snapshotInfo.getPathPreviousSnapshotId()); + + snapshotInfos.add(snapshotInfo); + previouseSnapshotInfo = snapshotInfo; + } + return null; + } + + /** + * Reading Snap1 against snap5 + */ + @Test + public void testLockOrderingWithOverLappingLocks() { + + + } + @Test public void testConstructor() throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); From 915562bc3102aaad85308ff057737af74fdd3fa4 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 09:45:14 -0400 Subject: [PATCH 035/126] HDDS-13797. Refactor OzoneManagerLock Resource class to handle handle hierarchical resource locking Change-Id: I433a52feb491b72ea303fa32025540a742555d08 --- .../ozone/om/S3SecretLockedManager.java | 2 +- .../hadoop/ozone/om/lock/FlatResource.java | 46 ++++++ .../lock/HierachicalResourceLockManager.java | 38 +++++ .../ozone/om/lock/IOzoneManagerLock.java | 13 -- .../hadoop/ozone/om/lock/LeveledResource.java | 132 ++++++++++++++++ .../ozone/om/lock/OzoneManagerLock.java | 141 ------------------ .../apache/hadoop/ozone/om/lock/Resource.java | 31 ++++ .../hadoop/ozone/om/lock/TestKeyPathLock.java | 20 +-- .../ozone/om/lock/TestOzoneManagerLock.java | 3 - ...napshotDeletingServiceIntegrationTest.java | 2 +- .../hadoop/ozone/om/BucketManagerImpl.java | 2 +- .../hadoop/ozone/om/KeyManagerImpl.java | 2 +- .../apache/hadoop/ozone/om/ListIterator.java | 2 +- .../OMDBCheckpointServletInodeBasedXfer.java | 2 +- .../apache/hadoop/ozone/om/OzoneManager.java | 4 +- .../hadoop/ozone/om/PrefixManagerImpl.java | 2 +- .../ozone/om/SnapshotDefragService.java | 2 +- .../hadoop/ozone/om/SstFilteringService.java | 2 +- .../hadoop/ozone/om/VolumeManagerImpl.java | 4 +- .../ozone/om/lock/OBSKeyPathLockStrategy.java | 4 +- .../om/lock/RegularBucketLockStrategy.java | 2 +- .../request/bucket/OMBucketCreateRequest.java | 4 +- .../request/bucket/OMBucketDeleteRequest.java | 4 +- .../bucket/OMBucketSetOwnerRequest.java | 2 +- .../bucket/OMBucketSetPropertyRequest.java | 2 +- .../bucket/acl/OMBucketAclRequest.java | 2 +- .../file/OMDirectoryCreateRequest.java | 2 +- .../file/OMDirectoryCreateRequestWithFSO.java | 2 +- .../om/request/file/OMFileCreateRequest.java | 2 +- .../file/OMFileCreateRequestWithFSO.java | 2 +- .../request/file/OMRecoverLeaseRequest.java | 2 +- .../request/key/OMAllocateBlockRequest.java | 2 +- .../key/OMAllocateBlockRequestWithFSO.java | 2 +- .../key/OMDirectoriesPurgeRequestWithFSO.java | 2 +- .../om/request/key/OMKeyCommitRequest.java | 2 +- .../key/OMKeyCommitRequestWithFSO.java | 2 +- .../key/OMKeyCreateRequestWithFSO.java | 2 +- .../om/request/key/OMKeyDeleteRequest.java | 2 +- .../key/OMKeyDeleteRequestWithFSO.java | 2 +- .../om/request/key/OMKeyPurgeRequest.java | 2 +- .../om/request/key/OMKeyRenameRequest.java | 2 +- .../key/OMKeyRenameRequestWithFSO.java | 2 +- .../ozone/om/request/key/OMKeyRequest.java | 2 +- .../om/request/key/OMKeySetTimesRequest.java | 2 +- .../key/OMKeySetTimesRequestWithFSO.java | 2 +- .../om/request/key/OMKeysDeleteRequest.java | 2 +- .../om/request/key/OMKeysRenameRequest.java | 2 +- .../request/key/OMOpenKeysDeleteRequest.java | 2 +- .../om/request/key/acl/OMKeyAclRequest.java | 2 +- .../key/acl/OMKeyAclRequestWithFSO.java | 2 +- .../key/acl/prefix/OMPrefixAclRequest.java | 2 +- ...S3ExpiredMultipartUploadsAbortRequest.java | 2 +- .../S3InitiateMultipartUploadRequest.java | 2 +- ...InitiateMultipartUploadRequestWithFSO.java | 2 +- .../S3MultipartUploadAbortRequest.java | 2 +- .../S3MultipartUploadCommitPartRequest.java | 2 +- .../S3MultipartUploadCompleteRequest.java | 2 +- .../tagging/S3DeleteObjectTaggingRequest.java | 2 +- .../S3DeleteObjectTaggingRequestWithFSO.java | 2 +- .../s3/tagging/S3PutObjectTaggingRequest.java | 2 +- .../S3PutObjectTaggingRequestWithFSO.java | 2 +- .../s3/tenant/OMTenantAssignAdminRequest.java | 2 +- .../OMTenantAssignUserAccessIdRequest.java | 2 +- .../s3/tenant/OMTenantCreateRequest.java | 4 +- .../s3/tenant/OMTenantDeleteRequest.java | 2 +- .../s3/tenant/OMTenantRevokeAdminRequest.java | 2 +- .../OMTenantRevokeUserAccessIdRequest.java | 2 +- .../snapshot/OMSnapshotCreateRequest.java | 4 +- .../snapshot/OMSnapshotDeleteRequest.java | 4 +- .../snapshot/OMSnapshotRenameRequest.java | 4 +- .../request/volume/OMQuotaRepairRequest.java | 4 +- .../request/volume/OMVolumeCreateRequest.java | 4 +- .../request/volume/OMVolumeDeleteRequest.java | 4 +- .../volume/OMVolumeSetOwnerRequest.java | 2 +- .../volume/OMVolumeSetQuotaRequest.java | 2 +- .../volume/acl/OMVolumeAclRequest.java | 2 +- .../snapshot/OMSnapshotPurgeResponse.java | 2 +- .../om/service/SnapshotDeletingService.java | 2 +- .../ozone/om/snapshot/MultiSnapshotLocks.java | 2 +- .../ozone/om/snapshot/SnapshotCache.java | 2 +- .../om/snapshot/filter/ReclaimableFilter.java | 2 +- ...tOMDirectoriesPurgeRequestAndResponse.java | 2 +- .../om/snapshot/TestMultiSnapshotLocks.java | 4 +- .../ozone/om/snapshot/TestSnapshotCache.java | 2 +- .../filter/AbstractReclaimableFilterTest.java | 2 +- 85 files changed, 347 insertions(+), 257 deletions(-) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java index d42df2acbd24..2efe66b9db85 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.S3_SECRET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.S3_SECRET_LOCK; import java.io.IOException; import java.util.List; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java new file mode 100644 index 000000000000..f23a6ee78f28 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +/** + * Flat Resource defined in Ozone. Locks can be acquired on a resource independent of one another. + */ +public enum FlatResource implements Resource { + // Background services lock on a Snapshot. + SNAPSHOT_GC_LOCK("SNAPSHOT_GC_LOCK"), + // Lock acquired on a Snapshot's RocksDB Handle. + SNAPSHOT_DB_LOCK("SNAPSHOT_DB_LOCK"); + + private String name; + private IOzoneManagerLock.ResourceManager resourceManager; + + FlatResource(String name) { + this.name = name; + this.resourceManager = new IOzoneManagerLock.ResourceManager(); + } + + @Override + public String getName() { + return name; + } + + @Override + public IOzoneManagerLock.ResourceManager getResourceManager() { + return resourceManager; + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java new file mode 100644 index 000000000000..94eba94d5a80 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +/** + * Interface for Hierachical Resource Lock where the lock order acquired on resource is going to be deterministic and + * there is no cyclic lock ordering on resources. + * Typically, this can be used for locking elements which form a DAG like structure.(E.g. FSO tree, Snapshot chain etc.) + */ +public interface HierachicalResourceLockManager { + + HierarchicalResourceLock acquireLock(Resource resource, String key); + + /** + * Interface for managing the lock lifecycle corresponding to a Hierarchical Resource. + */ + interface HierarchicalResourceLock extends AutoCloseable { + boolean isLockAcquired(); + + @Override + void close(); + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java index 7e8ed7c78171..6a17a0f69b16 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java @@ -72,19 +72,6 @@ boolean isWriteLockedByCurrentThread(Resource resource, OMLockMetrics getOMLockMetrics(); - /** - * Defines a resource interface used to represent entities that can be - * associated with locks in the Ozone Manager Lock mechanism. A resource - * implementation provides a name and an associated {@link ResourceManager} - * to manage its locking behavior. - */ - interface Resource { - - String getName(); - - ResourceManager getResourceManager(); - } - /** * The ResourceManager class provides functionality for managing * information about resource read and write lock usage. It tracks the time of diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java new file mode 100644 index 000000000000..bb6b14e15882 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +/** + * Leveled Resource defined in Ozone. + * Enforces lock acquisition ordering based on the resource level. A resource at lower level cannot be acquired + * after a higher level lock is already acquired. + */ +public enum LeveledResource implements Resource { + // For S3 Bucket need to allow only for S3, that should be means only 1. + S3_BUCKET_LOCK((byte) 0, "S3_BUCKET_LOCK"), // = 1 + + // For volume need to allow both s3 bucket and volume. 01 + 10 = 11 (3) + VOLUME_LOCK((byte) 1, "VOLUME_LOCK"), // = 2 + + // For bucket we need to allow both s3 bucket, volume and bucket. Which + // is equal to 100 + 010 + 001 = 111 = 4 + 2 + 1 = 7 + BUCKET_LOCK((byte) 2, "BUCKET_LOCK"), // = 4 + + // For user we need to allow s3 bucket, volume, bucket and user lock. + // Which is 8 4 + 2 + 1 = 15 + USER_LOCK((byte) 3, "USER_LOCK"), // 15 + + S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31 + KEY_PATH_LOCK((byte) 5, "KEY_PATH_LOCK"), //63 + PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127 + SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"); // = 255 + + // This will tell the value, till which we can allow locking. + private short mask; + + // This value will help during setLock, and also will tell whether we can + // re-acquire lock or not. + private short setMask; + + // Name of the resource. + private String name; + + private IOzoneManagerLock.ResourceManager resourceManager; + + LeveledResource(byte pos, String name) { + // level of the resource + this.mask = (short) (Math.pow(2, pos + 1) - 1); + this.setMask = (short) Math.pow(2, pos); + this.name = name; + this.resourceManager = new IOzoneManagerLock.ResourceManager(); + } + + boolean canLock(short lockSetVal) { + + // For USER_LOCK, S3_SECRET_LOCK and PREFIX_LOCK we shall not allow + // re-acquire locks from single thread. 2nd condition is we have + // acquired one of these locks, but after that trying to acquire a lock + // with less than equal of lockLevel, we should disallow. + if (((USER_LOCK.setMask & lockSetVal) == USER_LOCK.setMask || + (S3_SECRET_LOCK.setMask & lockSetVal) == S3_SECRET_LOCK.setMask || + (PREFIX_LOCK.setMask & lockSetVal) == PREFIX_LOCK.setMask) + && setMask <= lockSetVal) { + return false; + } + + + // Our mask is the summation of bits of all previous possible locks. In + // other words it is the largest possible value for that bit position. + + // For example for Volume lock, bit position is 1, and mask is 3. Which + // is the largest value that can be represented with 2 bits is 3. + // Therefore if lockSet is larger than mask we have to return false i.e + // some other higher order lock has been acquired. + + return lockSetVal <= mask; + } + + /** + * Set Lock bits in lockSetVal. + * + * @param lockSetVal + * @return Updated value which has set lock bits. + */ + short setLock(short lockSetVal) { + return (short) (lockSetVal | setMask); + } + + /** + * Clear lock from lockSetVal. + * + * @param lockSetVal + * @return Updated value which has cleared lock bits. + */ + short clearLock(short lockSetVal) { + return (short) (lockSetVal & ~setMask); + } + + /** + * Return true, if this level is locked, else false. + * + * @param lockSetVal + */ + boolean isLevelLocked(short lockSetVal) { + return (lockSetVal & setMask) == setMask; + } + + @Override + public String getName() { + return name; + } + + @Override + public IOzoneManagerLock.ResourceManager getResourceManager() { + return resourceManager; + } + + short getMask() { + return mask; + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index 364322c3ae38..cd6d85c9e3b2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -578,34 +578,6 @@ public OMLockMetrics getOMLockMetrics() { return omLockMetrics; } - /** - * Flat Resource defined in Ozone. Locks can be acquired on a resource independent of one another. - */ - public enum FlatResource implements Resource { - // Background services lock on a Snapshot. - SNAPSHOT_GC_LOCK("SNAPSHOT_GC_LOCK"), - // Lock acquired on a Snapshot's RocksDB Handle. - SNAPSHOT_DB_LOCK("SNAPSHOT_DB_LOCK"); - - private String name; - private ResourceManager resourceManager; - - FlatResource(String name) { - this.name = name; - this.resourceManager = new ResourceManager(); - } - - @Override - public String getName() { - return name; - } - - @Override - public ResourceManager getResourceManager() { - return resourceManager; - } - } - private abstract static class ResourceLockManager { private final ThreadLocal omLockDetails = ThreadLocal.withInitial(OMLockDetails::new); @@ -690,119 +662,6 @@ public OMLockDetails lockResource(LeveledResource resource) { } } - /** - * Leveled Resource defined in Ozone. - * Enforces lock acquisition ordering based on the resource level. A resource at lower level cannot be acquired - * after a higher level lock is already acquired. - */ - public enum LeveledResource implements Resource { - // For S3 Bucket need to allow only for S3, that should be means only 1. - S3_BUCKET_LOCK((byte) 0, "S3_BUCKET_LOCK"), // = 1 - - // For volume need to allow both s3 bucket and volume. 01 + 10 = 11 (3) - VOLUME_LOCK((byte) 1, "VOLUME_LOCK"), // = 2 - - // For bucket we need to allow both s3 bucket, volume and bucket. Which - // is equal to 100 + 010 + 001 = 111 = 4 + 2 + 1 = 7 - BUCKET_LOCK((byte) 2, "BUCKET_LOCK"), // = 4 - - // For user we need to allow s3 bucket, volume, bucket and user lock. - // Which is 8 4 + 2 + 1 = 15 - USER_LOCK((byte) 3, "USER_LOCK"), // 15 - - S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31 - KEY_PATH_LOCK((byte) 5, "KEY_PATH_LOCK"), //63 - PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127 - SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"); // = 255 - - // This will tell the value, till which we can allow locking. - private short mask; - - // This value will help during setLock, and also will tell whether we can - // re-acquire lock or not. - private short setMask; - - // Name of the resource. - private String name; - - private ResourceManager resourceManager; - - LeveledResource(byte pos, String name) { - // level of the resource - this.mask = (short) (Math.pow(2, pos + 1) - 1); - this.setMask = (short) Math.pow(2, pos); - this.name = name; - this.resourceManager = new ResourceManager(); - } - - boolean canLock(short lockSetVal) { - - // For USER_LOCK, S3_SECRET_LOCK and PREFIX_LOCK we shall not allow - // re-acquire locks from single thread. 2nd condition is we have - // acquired one of these locks, but after that trying to acquire a lock - // with less than equal of lockLevel, we should disallow. - if (((USER_LOCK.setMask & lockSetVal) == USER_LOCK.setMask || - (S3_SECRET_LOCK.setMask & lockSetVal) == S3_SECRET_LOCK.setMask || - (PREFIX_LOCK.setMask & lockSetVal) == PREFIX_LOCK.setMask) - && setMask <= lockSetVal) { - return false; - } - - - // Our mask is the summation of bits of all previous possible locks. In - // other words it is the largest possible value for that bit position. - - // For example for Volume lock, bit position is 1, and mask is 3. Which - // is the largest value that can be represented with 2 bits is 3. - // Therefore if lockSet is larger than mask we have to return false i.e - // some other higher order lock has been acquired. - - return lockSetVal <= mask; - } - - /** - * Set Lock bits in lockSetVal. - * - * @param lockSetVal - * @return Updated value which has set lock bits. - */ - short setLock(short lockSetVal) { - return (short) (lockSetVal | setMask); - } - - /** - * Clear lock from lockSetVal. - * - * @param lockSetVal - * @return Updated value which has cleared lock bits. - */ - short clearLock(short lockSetVal) { - return (short) (lockSetVal & ~setMask); - } - - /** - * Return true, if this level is locked, else false. - * @param lockSetVal - */ - boolean isLevelLocked(short lockSetVal) { - return (lockSetVal & setMask) == setMask; - } - - @Override - public String getName() { - return name; - } - - @Override - public ResourceManager getResourceManager() { - return resourceManager; - } - - short getMask() { - return mask; - } - } - /** * Update the processing details. * diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java new file mode 100644 index 000000000000..2ef2510f12d5 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +/** + * Defines a resource interface used to represent entities that can be + * associated with locks in the Ozone Manager Lock mechanism. A resource + * implementation provides a name and an associated {@link IOzoneManagerLock.ResourceManager} + * to manage its locking behavior. + */ +public interface Resource { + + String getName(); + + IOzoneManagerLock.ResourceManager getResourceManager(); +} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java index 53fdc659883a..c5d50ebdbd9a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java @@ -39,8 +39,8 @@ class TestKeyPathLock extends TestOzoneManagerLock { private static final Logger LOG = LoggerFactory.getLogger(TestKeyPathLock.class); - private final OzoneManagerLock.LeveledResource resource = - OzoneManagerLock.LeveledResource.KEY_PATH_LOCK; + private final LeveledResource resource = + LeveledResource.KEY_PATH_LOCK; @Test void testKeyPathLockMultiThreading() throws Exception { @@ -224,8 +224,8 @@ private void testDiffKeyPathWriteLockMultiThreadingUtil( @Test void testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() { - OzoneManagerLock.LeveledResource higherResource = - OzoneManagerLock.LeveledResource.BUCKET_LOCK; + LeveledResource higherResource = + LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -246,8 +246,8 @@ void testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() { @Test void testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() { - OzoneManagerLock.LeveledResource higherResource = - OzoneManagerLock.LeveledResource.BUCKET_LOCK; + LeveledResource higherResource = + LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -268,8 +268,8 @@ void testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() { @Test void testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() { - OzoneManagerLock.LeveledResource higherResource = - OzoneManagerLock.LeveledResource.BUCKET_LOCK; + LeveledResource higherResource = + LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -290,8 +290,8 @@ void testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() { @Test void testAcquireReadBucketLockWhileAcquiredWriteKeyPathLock() { - OzoneManagerLock.LeveledResource higherResource = - OzoneManagerLock.LeveledResource.BUCKET_LOCK; + LeveledResource higherResource = + LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index a1d853eb6b39..652f586f69ea 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -35,9 +35,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; -import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock.Resource; -import org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource; -import org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java index f4c83fc08a5f..8d8bde304dc4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java @@ -23,7 +23,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_GC_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_GC_LOCK; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index d6775b0495a9..d1c035130b9d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index e458fa73236a..00a8d4138ea4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -76,7 +76,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.util.Time.monotonicNow; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java index 426e7b73ec4f..b12b7ba8bcd7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.Closeable; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java index 7b5fe844d6ae..28769f75409c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java @@ -27,7 +27,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_DB_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.includeSnapshotData; import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.logEstimatedTarballSize; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_PREFIX; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 56e51cf4026e..ad77b921af9b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -94,8 +94,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PERMISSION_DENIED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.RaftServerStatus.LEADER_AND_READY; import static org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.getRaftGroupIdFromOmServiceId; import static org.apache.hadoop.ozone.om.s3.S3SecretStoreConfigurationKeys.DEFAULT_SECRET_STORAGE_TYPE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index f6615b92f2d3..e7c70ecc808c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -23,7 +23,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_PATH_IN_ACL_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.PREFIX_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.PREFIX_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 436593b861b6..9747bb7c8942 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DEFRAG_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DEFRAG_LIMIT_PER_TASK_DEFAULT; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_GC_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_GC_LOCK; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index b94fd45bf7fb..522ea7df6de5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_SST_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_SST_DELETING_LIMIT_PER_TASK_DEFAULT; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_DB_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getColumnFamilyToKeyPrefixMap; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index 0f1be7855788..812d5054b2e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java index c715856db80f..9aaddd1efc48 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.lock; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.KEY_PATH_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.KEY_PATH_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java index 76071a408b4a..b8116bc4c607 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.lock; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import org.apache.hadoop.ozone.om.OMMetadataManager; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 838ee3be86ca..6f7e6975e295 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -21,8 +21,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java index 4d802f9078e1..d1e0d7237885 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.CONTAINS_SNAPSHOT; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java index e60d5019ff41..4d57b22bed99 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 270b95d06da3..2e27c3db0b72 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java index 565c6e4854d8..6c2a8987f2bc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket.acl; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 4e6ac64edcd2..2f6cf761a620 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java index 5adcfec9617c..f0f5b7aa1ea0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index b8812ddda99b..c0cdc5dc28ff 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.file; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index d04c9f421e47..5392d28269cd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.file; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java index 5c96ae67fbe7..f05915bfc0a3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java @@ -24,7 +24,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_CLOSED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_SOFT_LIMIT_PERIOD; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.HBASE_SUPPORT; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RecoverLease; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index 7c5660b93806..b172aa2318dd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java index 799983cbe441..1d3e4bdb509d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import jakarta.annotation.Nonnull; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index ed6185141d6d..0ed6fa42ba04 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 31f1d9d71801..3902af8fd2d4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -22,7 +22,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index a23716d40d1c..cc86709a1a98 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_CLOSED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index aa1338fa8cfa..b3a7e2bc547a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index 5c2065356c0c..e7fdc0db0c16 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java index 75b5966e005e..809ef4c74f70 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index 5fe932c4a094..6134b3e40fbb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index f1d71d99fdfe..00a5301d8434 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index 8163b902dbb5..b3098e518957 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.OmUtils.normalizeKey; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.RENAME_OPEN_FILE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 2317a4815910..9c9da2bd88ba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -28,7 +28,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.util.Time.monotonicNow; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java index 353a17757025..8444b1853ac3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java index 009bcd1662c1..23acba85f3de 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index 427b2978f9c6..305e23ff5b4f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -26,7 +26,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.UNDELETED_KEYS_LIST; import static org.apache.hadoop.ozone.OzoneConsts.VOLUME; import static org.apache.hadoop.ozone.audit.OMAction.DELETE_KEYS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java index ef4d64b27c9b..c2921323d636 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.RENAMED_KEYS_MAP; import static org.apache.hadoop.ozone.OzoneConsts.UNRENAMED_KEYS_MAP; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_RENAME; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java index 3997f1a67782..5ac9c3c93d1e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java index 67a7f8a626b7..2dec9e910a60 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java index f32a22b17329..926ffdb694c0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index 5e1f3513564a..334ee51b6e4a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key.acl.prefix; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.PREFIX_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.PREFIX_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java index 5f7d01d9a733..29d7cfbfcc1f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.ArrayList; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index ac27d0e0a173..fdd370c4bb8f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index 7e781635603b..dabec5043e7e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 1e64edfb5be3..fbe219ecfcea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 5f715ded0b1b..5c57ce1e06d9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 6d41da38087c..5b8a286fee68 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import jakarta.annotation.Nullable; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java index 12c4ce13de5c..a884673d042d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java index b40e6c1f8a42..1e5e6bdac59d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.Map; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java index 23d7a40f26de..524f52355c9a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java index 05a45322c599..cbad899e2d75 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.Map; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java index 08979e4e8090..2501dfd150e0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java index bb2c1a187856..84129c0039f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_MAXIMUM_ACCESS_ID_LENGTH; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java index 3732f074bb0c..47432b1bb73e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.USER_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java index bcb7e010bf8e..1baa18587474 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java index 469eed85e8bc..ae332c2e719e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java index 2458c8656256..7e814bac5a5e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java index 6211d4114005..37e67f89a2b8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.hdds.HddsUtils.toProtobuf; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import com.google.protobuf.ByteString; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java index 3f8bae61c530..11e172040ffa 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java index 7a4cdc640dce..ed2543def3d0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java index 819edf6c01f5..c3ca72f71a15 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OLD_QUOTA_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index 1397f8a1b9f3..8889b9b0fa53 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.volume; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java index 943a5ea5700c..68ac2690085c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java index f9a6fa303590..862577683909 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java index b4279eac2d4d..d7092d0b2b49 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java index 88d786cdd204..7181fa8eca89 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume.acl; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 267547bc1e54..3797b3fcf2eb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.response.snapshot; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.SNAPSHOT_INFO_TABLE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_DB_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import com.google.common.annotations.VisibleForTesting; import jakarta.annotation.Nonnull; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index 75e9a20cdf12..db44337ee411 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -22,7 +22,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK_DEFAULT; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_GC_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_GC_LOCK; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java index 525877306965..ec19cd94b549 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java @@ -26,8 +26,8 @@ import java.util.stream.Collectors; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; -import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock.Resource; import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.Resource; /** * Class to take multiple locks on multiple snapshots. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java index 27c29b534495..6867f819b9c3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.snapshot; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_DB_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COLUMN_FAMILIES_TO_TRACK_IN_DAG; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index 5dc78e708fcb..7d227dfb641c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.snapshot.filter; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_GC_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_GC_LOCK; import java.io.Closeable; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index 54087fa64dc1..05385ddc5bad 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getOmKeyInfo; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java index 9c358a9261b3..cbff0398882b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java @@ -36,8 +36,8 @@ import java.util.UUID; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.LeveledResource; import org.apache.hadoop.ozone.om.lock.OMLockDetails; -import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -54,7 +54,7 @@ public class TestMultiSnapshotLocks { private IOzoneManagerLock mockLock; @Mock - private OzoneManagerLock.LeveledResource mockResource; + private LeveledResource mockResource; private MultiSnapshotLocks multiSnapshotLocks; private UUID obj1 = UUID.randomUUID(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java index 6fbc09eb89c8..9406d74c5ff6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.snapshot; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_DB_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java index 13ba79a77f82..ef97975ca8ec 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_GC_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_GC_LOCK; import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyList; From 24da3ebee1d9633995aba4fb7c27f9a903b71d15 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 12:20:44 -0400 Subject: [PATCH 036/126] HDDS-13797. Update interface Change-Id: I5ffd4cff6028b50c8d75ea9b3885c1e9818fe968 --- .../lock/HierachicalResourceLockManager.java | 30 +++++++++++++++---- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java index 94eba94d5a80..0cc8df45e2c7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java @@ -17,22 +17,40 @@ package org.apache.hadoop.ozone.om.lock; +import java.io.Closeable; +import java.io.IOException; + /** * Interface for Hierachical Resource Lock where the lock order acquired on resource is going to be deterministic and * there is no cyclic lock ordering on resources. * Typically, this can be used for locking elements which form a DAG like structure.(E.g. FSO tree, Snapshot chain etc.) */ -public interface HierachicalResourceLockManager { +public interface HierachicalResourceLockManager extends AutoCloseable { - HierarchicalResourceLock acquireLock(Resource resource, String key); + /** + * Acquires a read lock on the specified resource using the provided key. + * + * @param resource the resource on which the read lock is to be acquired + * @param key a unique identifier used for managing the lock + * @return a {@code HierarchicalResourceLock} interface to manage the lifecycle of the acquired lock + * @throws IOException if an I/O error occurs during the process of acquiring the lock + */ + HierarchicalResourceLock acquireReadLock(FlatResource resource, String key) throws IOException; + + /** + * Acquires a write lock on the specified resource using the provided key. + * + * @param resource the resource on which the write lock is to be acquired + * @param key a unique identifier used for managing the lock + * @return a {@code HierarchicalResourceLock} interface to manage the lifecycle of the acquired lock + * @throws IOException if an I/O error occurs during the process of acquiring the lock + */ + HierarchicalResourceLock acquireWriteLock(FlatResource resource, String key) throws IOException; /** * Interface for managing the lock lifecycle corresponding to a Hierarchical Resource. */ - interface HierarchicalResourceLock extends AutoCloseable { + interface HierarchicalResourceLock extends Closeable { boolean isLockAcquired(); - - @Override - void close(); } } From 8f3774a2045a27a8cc91adfbfb1178df977f7192 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 12:39:32 -0400 Subject: [PATCH 037/126] HDDS-13798. Implement PoolBasedHierarchicalResourceLockManager for Hierarchical Resource Change-Id: Iabeb0c8a90500ed9f6a57e232470d20f7c7251bf --- hadoop-ozone/common/pom.xml | 4 + .../apache/hadoop/ozone/om/OMConfigKeys.java | 8 + ...lBasedHierarchicalResourceLockManager.java | 204 +++++++ ...adOnlyHierarchicalResourceLockManager.java | 64 ++ ...lBasedHierarchicalResourceLockManager.java | 576 ++++++++++++++++++ .../hadoop/ozone/om/OMMetadataManager.java | 6 + .../ozone/om/OmMetadataManagerImpl.java | 13 + 7 files changed, 875 insertions(+) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java create mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index cb082b9d6c44..1ecafebb8b3f 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -77,6 +77,10 @@ org.apache.commons commons-lang3 + + org.apache.commons + commons-pool2 + org.apache.hadoop hadoop-common diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 969288ed92c8..254a49ea9a99 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -674,6 +674,14 @@ public final class OMConfigKeys { "ozone.om.snapshot.compact.non.snapshot.diff.tables"; public static final boolean OZONE_OM_SNAPSHOT_COMPACT_NON_SNAPSHOT_DIFF_TABLES_DEFAULT = false; + public static final String OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT = + "ozone.om.hierarchical.resource.locks.soft.limit"; + public static final int OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT = 1024; + + public static final String OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT = + "ozone.om.hierarchical.resource.locks.hard.limit"; + public static final int OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT = 10000; + /** * Never constructed. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java new file mode 100644 index 000000000000..19d2dacb32da --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java @@ -0,0 +1,204 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT; + +import com.google.common.base.Preconditions; +import java.io.Closeable; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; +import org.apache.commons.pool2.BasePooledObjectFactory; +import org.apache.commons.pool2.PooledObject; +import org.apache.commons.pool2.impl.DefaultPooledObject; +import org.apache.commons.pool2.impl.GenericObjectPool; +import org.apache.commons.pool2.impl.GenericObjectPoolConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; + +/** + * A lock manager implementation that manages hierarchical resource locks + * using a pool of reusable {@link ReadWriteLock} instances. The implementation + * ensures deterministic lock ordering for resources, avoiding cyclic + * lock dependencies, and is typically useful for structures like + * DAGs (e.g., File System trees or snapshot chains). + */ +public class PoolBasedHierarchicalResourceLockManager implements HierachicalResourceLockManager { + private final GenericObjectPool lockPool; + private final Map> lockMap; + + public PoolBasedHierarchicalResourceLockManager(OzoneConfiguration conf) { + int softLimit = conf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, + OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT); + int hardLimit = conf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, + OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT); + GenericObjectPoolConfig config = new GenericObjectPoolConfig<>(); + config.setMaxIdle(softLimit); + config.setMaxTotal(hardLimit); + config.setBlockWhenExhausted(true); + this.lockPool = new GenericObjectPool<>(new ReadWriteLockFactory(), config); + this.lockMap = new ConcurrentHashMap<>(); + } + + private ReadWriteLock operateOnLock(FlatResource resource, String key, Consumer function) + throws IOException { + AtomicReference exception = new AtomicReference<>(); + Map resourceLockMap = + this.lockMap.computeIfAbsent(resource, k -> new ConcurrentHashMap<>()); + LockReferenceCountPair lockRef = resourceLockMap.compute(key, (k, v) -> { + if (v == null) { + try { + ReadWriteLock readWriteLock = this.lockPool.borrowObject(); + v = new LockReferenceCountPair(readWriteLock); + } catch (Exception e) { + exception.set(new IOException("Exception while initializing lock object.", e)); + return null; + } + } + function.accept(v); + Preconditions.checkState(v.getCount() >= 0); + if (v.getCount() == 0) { + this.lockPool.returnObject(v.getLock()); + return null; + } + return v; + }); + if (exception.get() != null) { + throw exception.get(); + } + return lockRef == null ? null : lockRef.getLock(); + } + + @Override + public HierarchicalResourceLock acquireReadLock(FlatResource resource, String key) throws IOException { + return acquireLock(resource, key, true); + } + + @Override + public HierarchicalResourceLock acquireWriteLock(FlatResource resource, String key) throws IOException { + return acquireLock(resource, key, false); + } + + private HierarchicalResourceLock acquireLock(FlatResource resource, String key, boolean isReadLock) + throws IOException { + ReadWriteLock readWriteLock = operateOnLock(resource, key, LockReferenceCountPair::increment); + if (readWriteLock == null) { + throw new IOException("Unable to acquire " + (isReadLock ? "read" : "write") + " lock on resource " + + resource + " and key " + key); + } + return new PoolBasedHierarchicalResourceLock(resource, key, + isReadLock ? readWriteLock.readLock() : readWriteLock.writeLock()); + } + + @Override + public void close() { + this.lockPool.close(); + } + + /** + * Represents a hierarchical resource lock mechanism that operates + * using a resource pool for acquiring and releasing locks. This class + * provides thread-safe management of read and write locks associated + * with specific hierarchical resources. + * + * A lock can either be a read lock or a write lock. This is determined + * at the time of instantiation. The lifecycle of the lock is managed + * through this class, and the lock is automatically released when the + * `close` method is invoked. + * + * This is designed to work in conjunction with the containing manager + * class, {@code PoolBasedHierarchicalResourceLockManager}, which oversees + * the lifecycle of multiple such locks. + */ + public class PoolBasedHierarchicalResourceLock implements HierarchicalResourceLock, Closeable { + + private boolean isLockAcquired; + private final Lock lock; + private final FlatResource resource; + private final String key; + + public PoolBasedHierarchicalResourceLock(FlatResource resource, String key, Lock lock) { + this.isLockAcquired = true; + this.lock = lock; + this.resource = resource; + this.key = key; + this.lock.lock(); + } + + @Override + public boolean isLockAcquired() { + return isLockAcquired; + } + + @Override + public synchronized void close() throws IOException { + if (isLockAcquired) { + this.lock.unlock(); + operateOnLock(resource, key, (LockReferenceCountPair::decrement)); + isLockAcquired = false; + } + } + } + + private static final class LockReferenceCountPair { + private int count; + private ReadWriteLock lock; + + private LockReferenceCountPair(ReadWriteLock lock) { + this.count = 0; + this.lock = lock; + } + + private void increment() { + count++; + } + + private void decrement() { + count--; + } + + private int getCount() { + return count; + } + + private ReadWriteLock getLock() { + return lock; + } + } + + private static class ReadWriteLockFactory extends BasePooledObjectFactory { + + @Override + public ReadWriteLock create() throws Exception { + return new ReentrantReadWriteLock(); + } + + @Override + public PooledObject wrap(ReadWriteLock obj) { + return new DefaultPooledObject<>(obj); + } + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java new file mode 100644 index 000000000000..4bf2065a0bc6 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +import java.io.IOException; + +/** + * A read only lock manager that does not acquire any lock. + */ +public class ReadOnlyHierarchicalResourceLockManager implements HierachicalResourceLockManager { + + private static final HierarchicalResourceLock EMPTY_LOCK_ACQUIRED = new HierarchicalResourceLock() { + @Override + public boolean isLockAcquired() { + return true; + } + + @Override + public void close() { + + } + }; + + private static final HierarchicalResourceLock EMPTY_LOCK_NOT_ACQUIRED = new HierarchicalResourceLock() { + @Override + public boolean isLockAcquired() { + return true; + } + + @Override + public void close() { + } + }; + + @Override + public HierarchicalResourceLock acquireReadLock(FlatResource resource, String key) throws IOException { + return EMPTY_LOCK_ACQUIRED; + } + + @Override + public HierarchicalResourceLock acquireWriteLock(FlatResource resource, String key) throws IOException { + return EMPTY_LOCK_NOT_ACQUIRED; + } + + @Override + public void close() throws Exception { + + } +} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java new file mode 100644 index 000000000000..319bf8a4245c --- /dev/null +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java @@ -0,0 +1,576 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager.HierarchicalResourceLock; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.ValueSource; + +/** + * Test class for {@link PoolBasedHierarchicalResourceLockManager}. + * + * This class tests the functionality of the pool-based hierarchical resource lock manager, + * including basic lock operations, concurrency scenarios, resource pool management, + * and error conditions. + */ +public class TestPoolBasedHierarchicalResourceLockManager { + + private PoolBasedHierarchicalResourceLockManager lockManager; + private OzoneConfiguration conf; + + @BeforeEach + public void setUp() { + conf = new OzoneConfiguration(); + lockManager = new PoolBasedHierarchicalResourceLockManager(conf); + } + + @AfterEach + public void tearDown() { + if (lockManager != null) { + lockManager.close(); + } + } + + /** + * Test basic read lock acquisition and release. + */ + @Test + public void testBasicReadLockAcquisition() throws Exception { + String key = "test-key-1"; + + try (HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + } + } + + /** + * Test basic write lock acquisition and release. + */ + @Test + public void testBasicWriteLockAcquisition() throws Exception { + String key = "test-key-2"; + + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_DB_LOCK, key)) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + } + } + + /** + * Test multiple read locks can be acquired on the same resource. + */ + @Test + public void testMultipleReadLocks() throws Exception { + String key = "test-key-3"; + + try (HierarchicalResourceLock lock1 = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key); + HierarchicalResourceLock lock2 = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + + assertNotNull(lock1); + assertNotNull(lock2); + assertTrue(lock1.isLockAcquired()); + assertTrue(lock2.isLockAcquired()); + } + } + + /** + * Test write lock exclusivity - only one write lock can be acquired at a time. + */ + @Test + @Timeout(10) + public void testWriteLockExclusivity() throws Exception { + String key = "test-key-4"; + CountDownLatch latch1 = new CountDownLatch(1); + CountDownLatch latch2 = new CountDownLatch(1); + AtomicBoolean secondLockAcquired = new AtomicBoolean(false); + + ExecutorService executor = Executors.newFixedThreadPool(2); + + try { + // First thread acquires write lock + CompletableFuture future1 = CompletableFuture.runAsync(() -> { + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_DB_LOCK, key)) { + latch1.countDown(); + // Hold lock for a short time + Thread.sleep(100); + } catch (Exception e) { + fail("First thread failed to acquire lock: " + e.getMessage()); + } + }, executor); + + // Wait for first lock to be acquired + latch1.await(); + + // Second thread tries to acquire write lock + CompletableFuture future2 = CompletableFuture.runAsync(() -> { + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_DB_LOCK, key)) { + secondLockAcquired.set(true); + latch2.countDown(); + } catch (Exception e) { + fail("Second thread failed to acquire lock: " + e.getMessage()); + } + }, executor); + + // Wait for both threads to complete + future1.get(5, TimeUnit.SECONDS); + future2.get(5, TimeUnit.SECONDS); + + // Second lock should have been acquired after first was released + assertTrue(secondLockAcquired.get()); + + } finally { + executor.shutdown(); + } + } + + /** + * Test read-write lock interaction - write lock blocks read locks. + */ + @Test + @Timeout(10) + public void testReadWriteLockInteraction() throws Exception { + String key = "test-key-5"; + CountDownLatch writeLockAcquired = new CountDownLatch(1); + CountDownLatch readLockAcquired = new CountDownLatch(1); + AtomicBoolean readLockBlocked = new AtomicBoolean(false); + + ExecutorService executor = Executors.newFixedThreadPool(2); + + try { + // First thread acquires write lock + CompletableFuture future1 = CompletableFuture.runAsync(() -> { + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + writeLockAcquired.countDown(); + // Hold lock for a short time + Thread.sleep(200); + } catch (Exception e) { + fail("Write lock acquisition failed: " + e.getMessage()); + } + }, executor); + + // Wait for write lock to be acquired + writeLockAcquired.await(); + + // Second thread tries to acquire read lock + CompletableFuture future2 = CompletableFuture.runAsync(() -> { + try { + // This should block until write lock is released + readLockBlocked.set(true); + try (HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + readLockAcquired.countDown(); + } + } catch (Exception e) { + fail("Read lock acquisition failed: " + e.getMessage()); + } + }, executor); + + // Wait for both threads to complete + future1.get(5, TimeUnit.SECONDS); + future2.get(5, TimeUnit.SECONDS); + + assertTrue(readLockBlocked.get()); + assertEquals(0, readLockAcquired.getCount()); + + } finally { + executor.shutdown(); + } + } + + /** + * Test lock state after closing. + */ + @Test + public void testLockStateAfterClose() throws Exception { + String key = "test-key-6"; + + HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, key); + assertTrue(lock.isLockAcquired()); + + lock.close(); + assertFalse(lock.isLockAcquired()); + } + + /** + * Test double close doesn't cause issues. + */ + @Test + public void testDoubleClose() throws Exception { + String key = "test-key-7"; + + HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_GC_LOCK, key); + assertTrue(lock.isLockAcquired()); + + // First close + lock.close(); + assertFalse(lock.isLockAcquired()); + + // Second close should not throw exception + lock.close(); + assertFalse(lock.isLockAcquired()); + } + + /** + * Test different resource types can be locked independently. + */ + @ParameterizedTest + @EnumSource(FlatResource.class) + public void testDifferentResourceTypes(FlatResource resource) throws Exception { + String key = "test-key-" + resource.name(); + + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(resource, key)) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + } + } + + + /** + * Test different keys on same resource type can be locked concurrently. + */ + @Test + public void testDifferentKeysOnSameResource() throws Exception { + String key1 = "test-key-8a"; + String key2 = "test-key-8b"; + + try (HierarchicalResourceLock lock1 = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_GC_LOCK, key1); + HierarchicalResourceLock lock2 = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_GC_LOCK, key2)) { + + assertNotNull(lock1); + assertNotNull(lock2); + assertTrue(lock1.isLockAcquired()); + assertTrue(lock2.isLockAcquired()); + } + } + + /** + * Test configuration parameters are respected. + */ + @Test + public void testConfigurationParameters() { + OzoneConfiguration customConf = new OzoneConfiguration(); + customConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, 100); + customConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, 500); + + try (PoolBasedHierarchicalResourceLockManager customLockManager = + new PoolBasedHierarchicalResourceLockManager(customConf)) { + + // Test that manager can be created with custom configuration + assertNotNull(customLockManager); + + // Basic functionality test with custom configuration + try (HierarchicalResourceLock lock = customLockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, "test")) { + assertTrue(lock.isLockAcquired()); + } catch (Exception e) { + fail("Lock acquisition failed with custom configuration: " + e.getMessage()); + } + } + } + + /** + * Test default configuration values. + */ + @Test + public void testDefaultConfiguration() { + OzoneConfiguration defaultConf = new OzoneConfiguration(); + + // Verify default values + assertEquals(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT, + defaultConf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, + OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT)); + assertEquals(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT, + defaultConf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, + OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT)); + + try (PoolBasedHierarchicalResourceLockManager defaultLockManager = + new PoolBasedHierarchicalResourceLockManager(defaultConf)) { + assertNotNull(defaultLockManager); + } + } + + /** + * Test concurrent access with multiple threads. + */ + @Test + @Timeout(30) + public void testConcurrentAccess() throws Exception { + int numThreads = 10; + int operationsPerThread = 50; + ExecutorService executor = Executors.newFixedThreadPool(numThreads); + CountDownLatch latch = new CountDownLatch(numThreads); + AtomicInteger successCount = new AtomicInteger(0); + AtomicReference exception = new AtomicReference<>(); + + try { + List> futures = new ArrayList<>(); + + for (int i = 0; i < numThreads; i++) { + final int threadId = i; + CompletableFuture future = CompletableFuture.runAsync(() -> { + try { + for (int j = 0; j < operationsPerThread; j++) { + String key = "thread-" + threadId + "-op-" + j; + FlatResource resource = FlatResource.values()[j % FlatResource.values().length]; + + // Randomly choose read or write lock + boolean isReadLock = (j % 2 == 0); + + try (HierarchicalResourceLock lock = isReadLock ? + lockManager.acquireReadLock(resource, key) : + lockManager.acquireWriteLock(resource, key)) { + + assertTrue(lock.isLockAcquired()); + + // Simulate some work + Thread.sleep(1); + + successCount.incrementAndGet(); + } + } + } catch (Exception e) { + exception.set(e); + } finally { + latch.countDown(); + } + }, executor); + + futures.add(future); + } + + // Wait for all threads to complete + latch.await(25, TimeUnit.SECONDS); + + // Check for exceptions + if (exception.get() != null) { + fail("Concurrent access test failed: " + exception.get().getMessage()); + } + + // Verify all operations succeeded + assertEquals(numThreads * operationsPerThread, successCount.get()); + + } finally { + executor.shutdown(); + } + } + + /** + * Test resource pool behavior under stress. + */ + @Test + @Timeout(20) + public void testResourcePoolStress() throws Exception { + // Use smaller pool limits for stress testing + OzoneConfiguration stressConf = new OzoneConfiguration(); + stressConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, 10); + stressConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, 20); + + try (PoolBasedHierarchicalResourceLockManager stressLockManager = + new PoolBasedHierarchicalResourceLockManager(stressConf)) { + + int numThreads = 5; + int operationsPerThread = 20; + ExecutorService executor = Executors.newFixedThreadPool(numThreads); + CountDownLatch latch = new CountDownLatch(numThreads); + AtomicInteger successCount = new AtomicInteger(0); + AtomicReference exception = new AtomicReference<>(); + + try { + for (int i = 0; i < numThreads; i++) { + final int threadId = i; + executor.submit(() -> { + try { + for (int j = 0; j < operationsPerThread; j++) { + String key = "stress-" + threadId + "-" + j; + + try (HierarchicalResourceLock lock = + stressLockManager.acquireWriteLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + + assertTrue(lock.isLockAcquired()); + + // Hold lock for a bit to stress the pool + Thread.sleep(10); + + successCount.incrementAndGet(); + } + } + } catch (Exception e) { + exception.set(e); + } finally { + latch.countDown(); + } + }); + } + + // Wait for all threads to complete + latch.await(15, TimeUnit.SECONDS); + + // Check for exceptions + if (exception.get() != null) { + fail("Resource pool stress test failed: " + exception.get().getMessage()); + } + + // Verify all operations succeeded + assertEquals(numThreads * operationsPerThread, successCount.get()); + + } finally { + executor.shutdown(); + } + } + } + + /** + * Test manager close functionality. + */ + @Test + public void testManagerClose() throws Exception { + String key = "test-key-close"; + + // Acquire a lock + HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, key); + assertTrue(lock.isLockAcquired()); + + // Close the lock + lock.close(); + assertFalse(lock.isLockAcquired()); + + // Close the manager + lockManager.close(); + + // Manager should be closed gracefully + // Note: We don't test acquiring locks after manager close as behavior is undefined + } + + /** + * Test null key handling. + */ + @Test + public void testNullKey() { + assertThrows(NullPointerException.class, () -> { + lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, null); + }); + } + + /** + * Test null resource handling. + */ + @Test + public void testNullResource() { + assertThrows(NullPointerException.class, () -> { + lockManager.acquireWriteLock(null, "test-key"); + }); + } + + /** + * Test empty key handling. + */ + @Test + public void testEmptyKey() throws Exception { + // Empty key should be allowed + try (HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, "")) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + } + } + + /** + * Test various key formats. + */ + @ParameterizedTest + @ValueSource(strings = {"simple", "key-with-dashes", "key_with_underscores", + "key.with.dots", "key/with/slashes", "123456789", + "key with spaces", "very-long-key-name-that-exceeds-normal-length-expectations"}) + public void testVariousKeyFormats(String key) throws Exception { + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_DB_LOCK, key)) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + } + } + + /** + * Test reentrant lock behavior - same thread can acquire multiple locks on same resource. + */ + @Test + public void testReentrantLockBehavior() throws Exception { + String key = "reentrant-test"; + + // Acquire first lock + try (HierarchicalResourceLock lock1 = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + assertTrue(lock1.isLockAcquired()); + + // Acquire second lock on same resource from same thread + try (HierarchicalResourceLock lock2 = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + assertTrue(lock2.isLockAcquired()); + + // Both locks should be active + assertTrue(lock1.isLockAcquired()); + assertTrue(lock2.isLockAcquired()); + } + + // First lock should still be active after second is released + assertTrue(lock1.isLockAcquired()); + } + } + + /** + * Test that IOException is properly propagated from pool operations. + */ + @Test + public void testIOExceptionPropagation() { + // This test verifies that IOExceptions from pool operations are properly handled + // In normal circumstances, the pool should not throw IOExceptions during basic operations + // but the code should handle them gracefully if they occur + + String key = "exception-test"; + + try (HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, key)) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + // If we reach here, no IOException was thrown, which is expected for normal operation + } catch (Exception e) { + // If Exception is thrown, it should be properly propagated + assertNotNull(e.getMessage()); + } + } +} diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 5713f218bd5c..16fc941c9bd8 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -51,6 +51,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; @@ -90,6 +91,11 @@ public interface OMMetadataManager extends DBStoreHAManager, AutoCloseable { */ IOzoneManagerLock getLock(); + /** + * Returns the Hierarchical ResourceLock used on Metadata DB. + */ + HierachicalResourceLockManager getHierarchicalLockManager(); + /** * Returns the epoch associated with current OM process. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index ca9f45f8d24c..53d71837048c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -103,9 +103,12 @@ import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithMetadata; +import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.lock.OmReadOnlyLock; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.PoolBasedHierarchicalResourceLockManager; +import org.apache.hadoop.ozone.om.lock.ReadOnlyHierarchicalResourceLockManager; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils; @@ -133,6 +136,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager, private DBStore store; private final IOzoneManagerLock lock; + private final HierachicalResourceLockManager hierarchicalLockManager; private TypedTable userTable; private TypedTable volumeTable; @@ -197,6 +201,7 @@ public OmMetadataManagerImpl(OzoneConfiguration conf, this.perfMetrics = this.ozoneManager.getPerfMetrics(); } this.lock = new OzoneManagerLock(conf); + this.hierarchicalLockManager = new PoolBasedHierarchicalResourceLockManager(conf); this.omEpoch = OmUtils.getOMEpoch(); start(conf); } @@ -207,6 +212,7 @@ public OmMetadataManagerImpl(OzoneConfiguration conf, protected OmMetadataManagerImpl() { OzoneConfiguration conf = new OzoneConfiguration(); this.lock = new OzoneManagerLock(conf); + this.hierarchicalLockManager = new PoolBasedHierarchicalResourceLockManager(conf); this.omEpoch = 0; perfMetrics = null; } @@ -239,6 +245,7 @@ public static OmMetadataManagerImpl createCheckpointMetadataManager( protected OmMetadataManagerImpl(OzoneConfiguration conf, File dir, String name) throws IOException { lock = new OmReadOnlyLock(); + hierarchicalLockManager = new ReadOnlyHierarchicalResourceLockManager(); omEpoch = 0; int maxOpenFiles = conf.getInt(OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES, OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES_DEFAULT); @@ -258,6 +265,7 @@ protected OmMetadataManagerImpl(OzoneConfiguration conf, File dir, String name) OmMetadataManagerImpl(OzoneConfiguration conf, String snapshotDirName, int maxOpenFiles) throws IOException { try { lock = new OmReadOnlyLock(); + hierarchicalLockManager = new ReadOnlyHierarchicalResourceLockManager(); omEpoch = 0; String snapshotDir = OMStorage.getOmDbDir(conf) + OM_KEY_PREFIX + OM_SNAPSHOT_CHECKPOINT_DIR; @@ -644,6 +652,11 @@ public IOzoneManagerLock getLock() { return lock; } + @Override + public HierachicalResourceLockManager getHierarchicalLockManager() { + return hierarchicalLockManager; + } + @Override public long getOmEpoch() { return omEpoch; From 6865fad02439befe27bb4f1172c780ad0dc14be6 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 15:38:02 -0400 Subject: [PATCH 038/126] HDDS-13797. Revert move of Leveled Resource and Resource enum/interface Change-Id: I9cfe0a545a5d4565b6a5e6fb94ea86f29d0f23ad --- .../ozone/om/S3SecretLockedManager.java | 2 +- .../hadoop/ozone/om/lock/FlatResource.java | 2 + .../ozone/om/lock/IOzoneManagerLock.java | 13 ++ .../hadoop/ozone/om/lock/LeveledResource.java | 132 ------------------ .../ozone/om/lock/OzoneManagerLock.java | 113 +++++++++++++++ .../apache/hadoop/ozone/om/lock/Resource.java | 31 ---- .../hadoop/ozone/om/lock/TestKeyPathLock.java | 20 +-- .../ozone/om/lock/TestOzoneManagerLock.java | 2 + .../hadoop/ozone/om/BucketManagerImpl.java | 2 +- .../hadoop/ozone/om/KeyManagerImpl.java | 2 +- .../apache/hadoop/ozone/om/ListIterator.java | 2 +- .../apache/hadoop/ozone/om/OzoneManager.java | 4 +- .../hadoop/ozone/om/PrefixManagerImpl.java | 2 +- .../hadoop/ozone/om/VolumeManagerImpl.java | 4 +- .../ozone/om/lock/OBSKeyPathLockStrategy.java | 4 +- .../om/lock/RegularBucketLockStrategy.java | 2 +- .../request/bucket/OMBucketCreateRequest.java | 4 +- .../request/bucket/OMBucketDeleteRequest.java | 4 +- .../bucket/OMBucketSetOwnerRequest.java | 2 +- .../bucket/OMBucketSetPropertyRequest.java | 2 +- .../bucket/acl/OMBucketAclRequest.java | 2 +- .../file/OMDirectoryCreateRequest.java | 2 +- .../file/OMDirectoryCreateRequestWithFSO.java | 2 +- .../om/request/file/OMFileCreateRequest.java | 2 +- .../file/OMFileCreateRequestWithFSO.java | 2 +- .../request/file/OMRecoverLeaseRequest.java | 2 +- .../request/key/OMAllocateBlockRequest.java | 2 +- .../key/OMAllocateBlockRequestWithFSO.java | 2 +- .../key/OMDirectoriesPurgeRequestWithFSO.java | 2 +- .../om/request/key/OMKeyCommitRequest.java | 2 +- .../key/OMKeyCommitRequestWithFSO.java | 2 +- .../key/OMKeyCreateRequestWithFSO.java | 2 +- .../om/request/key/OMKeyDeleteRequest.java | 2 +- .../key/OMKeyDeleteRequestWithFSO.java | 2 +- .../om/request/key/OMKeyPurgeRequest.java | 2 +- .../om/request/key/OMKeyRenameRequest.java | 2 +- .../key/OMKeyRenameRequestWithFSO.java | 2 +- .../ozone/om/request/key/OMKeyRequest.java | 2 +- .../om/request/key/OMKeySetTimesRequest.java | 2 +- .../key/OMKeySetTimesRequestWithFSO.java | 2 +- .../om/request/key/OMKeysDeleteRequest.java | 2 +- .../om/request/key/OMKeysRenameRequest.java | 2 +- .../request/key/OMOpenKeysDeleteRequest.java | 2 +- .../om/request/key/acl/OMKeyAclRequest.java | 2 +- .../key/acl/OMKeyAclRequestWithFSO.java | 2 +- .../key/acl/prefix/OMPrefixAclRequest.java | 2 +- ...S3ExpiredMultipartUploadsAbortRequest.java | 2 +- .../S3InitiateMultipartUploadRequest.java | 2 +- ...InitiateMultipartUploadRequestWithFSO.java | 2 +- .../S3MultipartUploadAbortRequest.java | 2 +- .../S3MultipartUploadCommitPartRequest.java | 2 +- .../S3MultipartUploadCompleteRequest.java | 2 +- .../tagging/S3DeleteObjectTaggingRequest.java | 2 +- .../S3DeleteObjectTaggingRequestWithFSO.java | 2 +- .../s3/tagging/S3PutObjectTaggingRequest.java | 2 +- .../S3PutObjectTaggingRequestWithFSO.java | 2 +- .../s3/tenant/OMTenantAssignAdminRequest.java | 2 +- .../OMTenantAssignUserAccessIdRequest.java | 2 +- .../s3/tenant/OMTenantCreateRequest.java | 4 +- .../s3/tenant/OMTenantDeleteRequest.java | 2 +- .../s3/tenant/OMTenantRevokeAdminRequest.java | 2 +- .../OMTenantRevokeUserAccessIdRequest.java | 2 +- .../snapshot/OMSnapshotCreateRequest.java | 4 +- .../snapshot/OMSnapshotDeleteRequest.java | 4 +- .../snapshot/OMSnapshotRenameRequest.java | 4 +- .../request/volume/OMQuotaRepairRequest.java | 4 +- .../request/volume/OMVolumeCreateRequest.java | 4 +- .../request/volume/OMVolumeDeleteRequest.java | 4 +- .../volume/OMVolumeSetOwnerRequest.java | 2 +- .../volume/OMVolumeSetQuotaRequest.java | 2 +- .../volume/acl/OMVolumeAclRequest.java | 2 +- .../ozone/om/snapshot/MultiSnapshotLocks.java | 2 +- ...tOMDirectoriesPurgeRequestAndResponse.java | 2 +- .../om/snapshot/TestMultiSnapshotLocks.java | 4 +- 74 files changed, 220 insertions(+), 253 deletions(-) delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java index 2efe66b9db85..d42df2acbd24 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.S3_SECRET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.S3_SECRET_LOCK; import java.io.IOException; import java.util.List; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java index f23a6ee78f28..73f8357252f2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.om.lock; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock.Resource; + /** * Flat Resource defined in Ozone. Locks can be acquired on a resource independent of one another. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java index 6a17a0f69b16..7e8ed7c78171 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java @@ -72,6 +72,19 @@ boolean isWriteLockedByCurrentThread(Resource resource, OMLockMetrics getOMLockMetrics(); + /** + * Defines a resource interface used to represent entities that can be + * associated with locks in the Ozone Manager Lock mechanism. A resource + * implementation provides a name and an associated {@link ResourceManager} + * to manage its locking behavior. + */ + interface Resource { + + String getName(); + + ResourceManager getResourceManager(); + } + /** * The ResourceManager class provides functionality for managing * information about resource read and write lock usage. It tracks the time of diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java deleted file mode 100644 index bb6b14e15882..000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.lock; - -/** - * Leveled Resource defined in Ozone. - * Enforces lock acquisition ordering based on the resource level. A resource at lower level cannot be acquired - * after a higher level lock is already acquired. - */ -public enum LeveledResource implements Resource { - // For S3 Bucket need to allow only for S3, that should be means only 1. - S3_BUCKET_LOCK((byte) 0, "S3_BUCKET_LOCK"), // = 1 - - // For volume need to allow both s3 bucket and volume. 01 + 10 = 11 (3) - VOLUME_LOCK((byte) 1, "VOLUME_LOCK"), // = 2 - - // For bucket we need to allow both s3 bucket, volume and bucket. Which - // is equal to 100 + 010 + 001 = 111 = 4 + 2 + 1 = 7 - BUCKET_LOCK((byte) 2, "BUCKET_LOCK"), // = 4 - - // For user we need to allow s3 bucket, volume, bucket and user lock. - // Which is 8 4 + 2 + 1 = 15 - USER_LOCK((byte) 3, "USER_LOCK"), // 15 - - S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31 - KEY_PATH_LOCK((byte) 5, "KEY_PATH_LOCK"), //63 - PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127 - SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"); // = 255 - - // This will tell the value, till which we can allow locking. - private short mask; - - // This value will help during setLock, and also will tell whether we can - // re-acquire lock or not. - private short setMask; - - // Name of the resource. - private String name; - - private IOzoneManagerLock.ResourceManager resourceManager; - - LeveledResource(byte pos, String name) { - // level of the resource - this.mask = (short) (Math.pow(2, pos + 1) - 1); - this.setMask = (short) Math.pow(2, pos); - this.name = name; - this.resourceManager = new IOzoneManagerLock.ResourceManager(); - } - - boolean canLock(short lockSetVal) { - - // For USER_LOCK, S3_SECRET_LOCK and PREFIX_LOCK we shall not allow - // re-acquire locks from single thread. 2nd condition is we have - // acquired one of these locks, but after that trying to acquire a lock - // with less than equal of lockLevel, we should disallow. - if (((USER_LOCK.setMask & lockSetVal) == USER_LOCK.setMask || - (S3_SECRET_LOCK.setMask & lockSetVal) == S3_SECRET_LOCK.setMask || - (PREFIX_LOCK.setMask & lockSetVal) == PREFIX_LOCK.setMask) - && setMask <= lockSetVal) { - return false; - } - - - // Our mask is the summation of bits of all previous possible locks. In - // other words it is the largest possible value for that bit position. - - // For example for Volume lock, bit position is 1, and mask is 3. Which - // is the largest value that can be represented with 2 bits is 3. - // Therefore if lockSet is larger than mask we have to return false i.e - // some other higher order lock has been acquired. - - return lockSetVal <= mask; - } - - /** - * Set Lock bits in lockSetVal. - * - * @param lockSetVal - * @return Updated value which has set lock bits. - */ - short setLock(short lockSetVal) { - return (short) (lockSetVal | setMask); - } - - /** - * Clear lock from lockSetVal. - * - * @param lockSetVal - * @return Updated value which has cleared lock bits. - */ - short clearLock(short lockSetVal) { - return (short) (lockSetVal & ~setMask); - } - - /** - * Return true, if this level is locked, else false. - * - * @param lockSetVal - */ - boolean isLevelLocked(short lockSetVal) { - return (lockSetVal & setMask) == setMask; - } - - @Override - public String getName() { - return name; - } - - @Override - public IOzoneManagerLock.ResourceManager getResourceManager() { - return resourceManager; - } - - short getMask() { - return mask; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index cd6d85c9e3b2..6eb735d2ccc3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -662,6 +662,119 @@ public OMLockDetails lockResource(LeveledResource resource) { } } + /** + * Leveled Resource defined in Ozone. + * Enforces lock acquisition ordering based on the resource level. A resource at lower level cannot be acquired + * after a higher level lock is already acquired. + */ + public enum LeveledResource implements Resource { + // For S3 Bucket need to allow only for S3, that should be means only 1. + S3_BUCKET_LOCK((byte) 0, "S3_BUCKET_LOCK"), // = 1 + + // For volume need to allow both s3 bucket and volume. 01 + 10 = 11 (3) + VOLUME_LOCK((byte) 1, "VOLUME_LOCK"), // = 2 + + // For bucket we need to allow both s3 bucket, volume and bucket. Which + // is equal to 100 + 010 + 001 = 111 = 4 + 2 + 1 = 7 + BUCKET_LOCK((byte) 2, "BUCKET_LOCK"), // = 4 + + // For user we need to allow s3 bucket, volume, bucket and user lock. + // Which is 8 4 + 2 + 1 = 15 + USER_LOCK((byte) 3, "USER_LOCK"), // 15 + + S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31 + KEY_PATH_LOCK((byte) 5, "KEY_PATH_LOCK"), //63 + PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127 + SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"); // = 255 + + // This will tell the value, till which we can allow locking. + private short mask; + + // This value will help during setLock, and also will tell whether we can + // re-acquire lock or not. + private short setMask; + + // Name of the resource. + private String name; + + private ResourceManager resourceManager; + + LeveledResource(byte pos, String name) { + // level of the resource + this.mask = (short) (Math.pow(2, pos + 1) - 1); + this.setMask = (short) Math.pow(2, pos); + this.name = name; + this.resourceManager = new ResourceManager(); + } + + boolean canLock(short lockSetVal) { + + // For USER_LOCK, S3_SECRET_LOCK and PREFIX_LOCK we shall not allow + // re-acquire locks from single thread. 2nd condition is we have + // acquired one of these locks, but after that trying to acquire a lock + // with less than equal of lockLevel, we should disallow. + if (((USER_LOCK.setMask & lockSetVal) == USER_LOCK.setMask || + (S3_SECRET_LOCK.setMask & lockSetVal) == S3_SECRET_LOCK.setMask || + (PREFIX_LOCK.setMask & lockSetVal) == PREFIX_LOCK.setMask) + && setMask <= lockSetVal) { + return false; + } + + + // Our mask is the summation of bits of all previous possible locks. In + // other words it is the largest possible value for that bit position. + + // For example for Volume lock, bit position is 1, and mask is 3. Which + // is the largest value that can be represented with 2 bits is 3. + // Therefore if lockSet is larger than mask we have to return false i.e + // some other higher order lock has been acquired. + + return lockSetVal <= mask; + } + + /** + * Set Lock bits in lockSetVal. + * + * @param lockSetVal + * @return Updated value which has set lock bits. + */ + short setLock(short lockSetVal) { + return (short) (lockSetVal | setMask); + } + + /** + * Clear lock from lockSetVal. + * + * @param lockSetVal + * @return Updated value which has cleared lock bits. + */ + short clearLock(short lockSetVal) { + return (short) (lockSetVal & ~setMask); + } + + /** + * Return true, if this level is locked, else false. + * @param lockSetVal + */ + boolean isLevelLocked(short lockSetVal) { + return (lockSetVal & setMask) == setMask; + } + + @Override + public String getName() { + return name; + } + + @Override + public ResourceManager getResourceManager() { + return resourceManager; + } + + short getMask() { + return mask; + } + } + /** * Update the processing details. * diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java deleted file mode 100644 index 2ef2510f12d5..000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.lock; - -/** - * Defines a resource interface used to represent entities that can be - * associated with locks in the Ozone Manager Lock mechanism. A resource - * implementation provides a name and an associated {@link IOzoneManagerLock.ResourceManager} - * to manage its locking behavior. - */ -public interface Resource { - - String getName(); - - IOzoneManagerLock.ResourceManager getResourceManager(); -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java index c5d50ebdbd9a..53fdc659883a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java @@ -39,8 +39,8 @@ class TestKeyPathLock extends TestOzoneManagerLock { private static final Logger LOG = LoggerFactory.getLogger(TestKeyPathLock.class); - private final LeveledResource resource = - LeveledResource.KEY_PATH_LOCK; + private final OzoneManagerLock.LeveledResource resource = + OzoneManagerLock.LeveledResource.KEY_PATH_LOCK; @Test void testKeyPathLockMultiThreading() throws Exception { @@ -224,8 +224,8 @@ private void testDiffKeyPathWriteLockMultiThreadingUtil( @Test void testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() { - LeveledResource higherResource = - LeveledResource.BUCKET_LOCK; + OzoneManagerLock.LeveledResource higherResource = + OzoneManagerLock.LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -246,8 +246,8 @@ void testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() { @Test void testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() { - LeveledResource higherResource = - LeveledResource.BUCKET_LOCK; + OzoneManagerLock.LeveledResource higherResource = + OzoneManagerLock.LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -268,8 +268,8 @@ void testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() { @Test void testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() { - LeveledResource higherResource = - LeveledResource.BUCKET_LOCK; + OzoneManagerLock.LeveledResource higherResource = + OzoneManagerLock.LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -290,8 +290,8 @@ void testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() { @Test void testAcquireReadBucketLockWhileAcquiredWriteKeyPathLock() { - LeveledResource higherResource = - LeveledResource.BUCKET_LOCK; + OzoneManagerLock.LeveledResource higherResource = + OzoneManagerLock.LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index 652f586f69ea..3486f44d753d 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -35,6 +35,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock.Resource; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index d1c035130b9d..d6775b0495a9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 00a8d4138ea4..e458fa73236a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -76,7 +76,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.util.Time.monotonicNow; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java index b12b7ba8bcd7..426e7b73ec4f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.Closeable; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index ad77b921af9b..56e51cf4026e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -94,8 +94,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PERMISSION_DENIED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.RaftServerStatus.LEADER_AND_READY; import static org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.getRaftGroupIdFromOmServiceId; import static org.apache.hadoop.ozone.om.s3.S3SecretStoreConfigurationKeys.DEFAULT_SECRET_STORAGE_TYPE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index e7c70ecc808c..f6615b92f2d3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -23,7 +23,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_PATH_IN_ACL_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.PREFIX_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.PREFIX_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index 812d5054b2e6..0f1be7855788 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java index 9aaddd1efc48..c715856db80f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.lock; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.KEY_PATH_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.KEY_PATH_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java index b8116bc4c607..76071a408b4a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.lock; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import org.apache.hadoop.ozone.om.OMMetadataManager; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 6f7e6975e295..838ee3be86ca 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -21,8 +21,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java index d1e0d7237885..4d802f9078e1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.CONTAINS_SNAPSHOT; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java index 4d57b22bed99..e60d5019ff41 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 2e27c3db0b72..270b95d06da3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java index 6c2a8987f2bc..565c6e4854d8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket.acl; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 2f6cf761a620..4e6ac64edcd2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java index f0f5b7aa1ea0..5adcfec9617c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index c0cdc5dc28ff..b8812ddda99b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.file; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index 5392d28269cd..d04c9f421e47 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.file; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java index f05915bfc0a3..5c96ae67fbe7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java @@ -24,7 +24,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_CLOSED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_SOFT_LIMIT_PERIOD; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.HBASE_SUPPORT; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RecoverLease; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index b172aa2318dd..7c5660b93806 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java index 1d3e4bdb509d..799983cbe441 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import jakarta.annotation.Nonnull; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index 0ed6fa42ba04..ed6185141d6d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 3902af8fd2d4..31f1d9d71801 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -22,7 +22,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index cc86709a1a98..a23716d40d1c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_CLOSED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index b3a7e2bc547a..aa1338fa8cfa 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index e7fdc0db0c16..5c2065356c0c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java index 809ef4c74f70..75b5966e005e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index 6134b3e40fbb..5fe932c4a094 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index 00a5301d8434..f1d71d99fdfe 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index b3098e518957..8163b902dbb5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.OmUtils.normalizeKey; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.RENAME_OPEN_FILE; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 9c9da2bd88ba..2317a4815910 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -28,7 +28,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.util.Time.monotonicNow; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java index 8444b1853ac3..353a17757025 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java index 23acba85f3de..009bcd1662c1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index 305e23ff5b4f..427b2978f9c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -26,7 +26,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.UNDELETED_KEYS_LIST; import static org.apache.hadoop.ozone.OzoneConsts.VOLUME; import static org.apache.hadoop.ozone.audit.OMAction.DELETE_KEYS; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java index c2921323d636..ef4d64b27c9b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.RENAMED_KEYS_MAP; import static org.apache.hadoop.ozone.OzoneConsts.UNRENAMED_KEYS_MAP; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_RENAME; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java index 5ac9c3c93d1e..3997f1a67782 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java index 2dec9e910a60..67a7f8a626b7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java index 926ffdb694c0..f32a22b17329 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index 334ee51b6e4a..5e1f3513564a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key.acl.prefix; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.PREFIX_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.PREFIX_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java index 29d7cfbfcc1f..5f7d01d9a733 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.ArrayList; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index fdd370c4bb8f..ac27d0e0a173 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index dabec5043e7e..7e781635603b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index fbe219ecfcea..1e64edfb5be3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 5c57ce1e06d9..5f715ded0b1b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 5b8a286fee68..6d41da38087c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import jakarta.annotation.Nullable; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java index a884673d042d..12c4ce13de5c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java index 1e5e6bdac59d..b40e6c1f8a42 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.Map; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java index 524f52355c9a..23d7a40f26de 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java index cbad899e2d75..05a45322c599 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.Map; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java index 2501dfd150e0..08979e4e8090 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java index 84129c0039f3..bb2c1a187856 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_MAXIMUM_ACCESS_ID_LENGTH; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java index 47432b1bb73e..3732f074bb0c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.USER_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java index 1baa18587474..bcb7e010bf8e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java index ae332c2e719e..469eed85e8bc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java index 7e814bac5a5e..2458c8656256 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java index 37e67f89a2b8..6211d4114005 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.hdds.HddsUtils.toProtobuf; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import com.google.protobuf.ByteString; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java index 11e172040ffa..3f8bae61c530 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java index ed2543def3d0..7a4cdc640dce 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java index c3ca72f71a15..819edf6c01f5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OLD_QUOTA_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index 8889b9b0fa53..1397f8a1b9f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.volume; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java index 68ac2690085c..943a5ea5700c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java index 862577683909..f9a6fa303590 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java index d7092d0b2b49..b4279eac2d4d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java index 7181fa8eca89..88d786cdd204 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume.acl; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java index ec19cd94b549..525877306965 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java @@ -26,8 +26,8 @@ import java.util.stream.Collectors; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock.Resource; import org.apache.hadoop.ozone.om.lock.OMLockDetails; -import org.apache.hadoop.ozone.om.lock.Resource; /** * Class to take multiple locks on multiple snapshots. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index 05385ddc5bad..54087fa64dc1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getOmKeyInfo; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java index cbff0398882b..9c358a9261b3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java @@ -36,8 +36,8 @@ import java.util.UUID; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; -import org.apache.hadoop.ozone.om.lock.LeveledResource; import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -54,7 +54,7 @@ public class TestMultiSnapshotLocks { private IOzoneManagerLock mockLock; @Mock - private LeveledResource mockResource; + private OzoneManagerLock.LeveledResource mockResource; private MultiSnapshotLocks multiSnapshotLocks; private UUID obj1 = UUID.randomUUID(); From 4711517be04886f67ea4b2a5e742c3786c9a2f20 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 18:24:38 -0400 Subject: [PATCH 039/126] HDDS-13798. Fix pmd findbugs Change-Id: I6953c4f0fab1b0b54e1b4f1fce69025fdd424ac9 --- .../TestPoolBasedHierarchicalResourceLockManager.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java index 319bf8a4245c..ebe2c6034015 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java @@ -380,7 +380,7 @@ public void testConcurrentAccess() throws Exception { } // Wait for all threads to complete - latch.await(25, TimeUnit.SECONDS); + assertTrue(latch.await(25, TimeUnit.SECONDS)); // Check for exceptions if (exception.get() != null) { @@ -389,7 +389,9 @@ public void testConcurrentAccess() throws Exception { // Verify all operations succeeded assertEquals(numThreads * operationsPerThread, successCount.get()); - + for (CompletableFuture future : futures) { + future.get(); + } } finally { executor.shutdown(); } @@ -444,7 +446,7 @@ public void testResourcePoolStress() throws Exception { } // Wait for all threads to complete - latch.await(15, TimeUnit.SECONDS); + assertTrue(latch.await(15, TimeUnit.SECONDS)); // Check for exceptions if (exception.get() != null) { From 655a72442a6336c610402f1b68233abfef33e7ca Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 18:31:12 -0400 Subject: [PATCH 040/126] HDDS-13798. Fix pmd findbugs Change-Id: I31407994bb2d717f22977730b8b33ebc6eb33eea --- .../om/lock/TestPoolBasedHierarchicalResourceLockManager.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java index ebe2c6034015..f589002cc089 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java @@ -58,11 +58,10 @@ public class TestPoolBasedHierarchicalResourceLockManager { private PoolBasedHierarchicalResourceLockManager lockManager; - private OzoneConfiguration conf; @BeforeEach public void setUp() { - conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); lockManager = new PoolBasedHierarchicalResourceLockManager(conf); } From 2bc61341cbbc45658990109059cbd5986d0c02c7 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 21:16:04 -0400 Subject: [PATCH 041/126] HDDS-13798. Fix ozone-default.xml Change-Id: I8b0d1657f809bf79cc5c5a64b336b7c6689aee91 --- .../common/src/main/resources/ozone-default.xml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index b200c0b5bf1f..462b1e4331f4 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -4838,4 +4838,14 @@ warm up edek cache if none of key successful on OM start up. + + ozone.om.hierarchical.resource.locks.soft.limit + 1024 + Soft limit for number of lock objects that could be idle in the pool. + + + ozone.om.hierarchical.resource.locks.hard.limit + 10000 + Maximum number of lock objects that could be present in the pool. + From 8e8c534e9ba8bb49cbc29318dac7d0710406c572 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 06:05:52 -0400 Subject: [PATCH 042/126] HDDS-13798. Stop lock data manager on metadata stop Change-Id: I39115b2cb142fd36e370c8c5e72d6e58ce1ffb3a --- .../om/lock/ReadOnlyHierarchicalResourceLockManager.java | 2 +- .../org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java index 4bf2065a0bc6..59d61ca062d1 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java @@ -39,7 +39,7 @@ public void close() { private static final HierarchicalResourceLock EMPTY_LOCK_NOT_ACQUIRED = new HierarchicalResourceLock() { @Override public boolean isLockAcquired() { - return true; + return false; } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 53d71837048c..b40905a328c1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -483,6 +483,11 @@ public void stop() throws IOException { store.close(); store = null; } + try { + hierarchicalLockManager.close(); + } catch (Exception e) { + LOG.error("Error closing hierarchical lock manager", e); + } tableCacheMetricsMap.values().forEach(TableCacheMetrics::unregister); // OzoneManagerLock cleanup lock.cleanup(); From f148f247a0780d93a49635c262e945cd696f137d Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 06:26:10 -0400 Subject: [PATCH 043/126] HDDS-13798. Update tests Change-Id: Ie4b4b50c28aa5c62eeae51feec99304fec4b079e --- ...lBasedHierarchicalResourceLockManager.java | 59 ++++++++++--------- 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java index f589002cc089..d0d3d75ade4a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java @@ -28,13 +28,16 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -288,7 +291,8 @@ public void testDifferentKeysOnSameResource() throws Exception { * Test configuration parameters are respected. */ @Test - public void testConfigurationParameters() { + public void testConfigurationParameters() + throws InterruptedException, IOException, ExecutionException, TimeoutException { OzoneConfiguration customConf = new OzoneConfiguration(); customConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, 100); customConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, 500); @@ -297,35 +301,34 @@ public void testConfigurationParameters() { new PoolBasedHierarchicalResourceLockManager(customConf)) { // Test that manager can be created with custom configuration + List locks = new ArrayList<>(); assertNotNull(customLockManager); - - // Basic functionality test with custom configuration - try (HierarchicalResourceLock lock = customLockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, "test")) { - assertTrue(lock.isLockAcquired()); - } catch (Exception e) { - fail("Lock acquisition failed with custom configuration: " + e.getMessage()); + for (int i = 0; i < 500; i++) { + try { + locks.add(customLockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, "test" + i)); + } catch (IOException e) { + fail("Lock acquisition failed with custom configuration: " + e.getMessage()); + } + } + CountDownLatch latch = new CountDownLatch(1); + CompletableFuture future = CompletableFuture.runAsync(() -> { + // Basic functionality test with custom configuration + latch.countDown(); + try (HierarchicalResourceLock lock = customLockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, + "test" + 501)) { + assertTrue(lock.isLockAcquired()); + } catch (Exception e) { + fail("Lock acquisition failed with custom configuration: " + e.getMessage()); + } + }); + Thread.sleep(1000); + latch.await(); + assertFalse(future.isDone()); + locks.get(0).close(); + future.get(5, TimeUnit.SECONDS); + for (HierarchicalResourceLock lock : locks) { + lock.close(); } - } - } - - /** - * Test default configuration values. - */ - @Test - public void testDefaultConfiguration() { - OzoneConfiguration defaultConf = new OzoneConfiguration(); - - // Verify default values - assertEquals(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT, - defaultConf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, - OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT)); - assertEquals(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT, - defaultConf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, - OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT)); - - try (PoolBasedHierarchicalResourceLockManager defaultLockManager = - new PoolBasedHierarchicalResourceLockManager(defaultConf)) { - assertNotNull(defaultLockManager); } } From da030c0fac84123ff7ebd15eb3edbaf6ba0e4aff Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 06:33:59 -0400 Subject: [PATCH 044/126] HDDS-13798. Rename class Change-Id: I4625b940c0fd242897d2e1a3ae299c001588be1b --- ...a => HierarchicalResourceLockManager.java} | 2 +- ...lBasedHierarchicalResourceLockManager.java | 2 +- ...adOnlyHierarchicalResourceLockManager.java | 2 +- ...lBasedHierarchicalResourceLockManager.java | 23 +++++++++++-------- .../hadoop/ozone/om/OMMetadataManager.java | 4 ++-- .../ozone/om/OmMetadataManagerImpl.java | 6 ++--- 6 files changed, 21 insertions(+), 18 deletions(-) rename hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/{HierachicalResourceLockManager.java => HierarchicalResourceLockManager.java} (97%) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierarchicalResourceLockManager.java similarity index 97% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierarchicalResourceLockManager.java index 0cc8df45e2c7..d34b199113c9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierarchicalResourceLockManager.java @@ -25,7 +25,7 @@ * there is no cyclic lock ordering on resources. * Typically, this can be used for locking elements which form a DAG like structure.(E.g. FSO tree, Snapshot chain etc.) */ -public interface HierachicalResourceLockManager extends AutoCloseable { +public interface HierarchicalResourceLockManager extends AutoCloseable { /** * Acquires a read lock on the specified resource using the provided key. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java index 19d2dacb32da..d601e31e6343 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java @@ -46,7 +46,7 @@ * lock dependencies, and is typically useful for structures like * DAGs (e.g., File System trees or snapshot chains). */ -public class PoolBasedHierarchicalResourceLockManager implements HierachicalResourceLockManager { +public class PoolBasedHierarchicalResourceLockManager implements HierarchicalResourceLockManager { private final GenericObjectPool lockPool; private final Map> lockMap; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java index 59d61ca062d1..19e114ae52ec 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java @@ -22,7 +22,7 @@ /** * A read only lock manager that does not acquire any lock. */ -public class ReadOnlyHierarchicalResourceLockManager implements HierachicalResourceLockManager { +public class ReadOnlyHierarchicalResourceLockManager implements HierarchicalResourceLockManager { private static final HierarchicalResourceLock EMPTY_LOCK_ACQUIRED = new HierarchicalResourceLock() { @Override diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java index d0d3d75ade4a..d9edd003504c 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java @@ -18,9 +18,7 @@ package org.apache.hadoop.ozone.om.lock; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -42,13 +40,12 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager.HierarchicalResourceLock; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager.HierarchicalResourceLock; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.EnumSource; import org.junit.jupiter.params.provider.ValueSource; /** @@ -257,15 +254,21 @@ public void testDoubleClose() throws Exception { /** * Test different resource types can be locked independently. */ - @ParameterizedTest - @EnumSource(FlatResource.class) - public void testDifferentResourceTypes(FlatResource resource) throws Exception { - String key = "test-key-" + resource.name(); + @Test + public void testDifferentResourceTypes() throws Exception { - try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(resource, key)) { + List locks = new ArrayList<>(); + for (FlatResource otherResource : FlatResource.values()) { + String key = "test-key"; + locks.add(lockManager.acquireWriteLock(otherResource, key)); + } + for (HierarchicalResourceLock lock : locks) { assertNotNull(lock); assertTrue(lock.isLockAcquired()); } + for (HierarchicalResourceLock lock : locks) { + lock.close(); + } } @@ -291,7 +294,7 @@ public void testDifferentKeysOnSameResource() throws Exception { * Test configuration parameters are respected. */ @Test - public void testConfigurationParameters() + public void testHardLimitsWithCustomConfiguration() throws InterruptedException, IOException, ExecutionException, TimeoutException { OzoneConfiguration customConf = new OzoneConfiguration(); customConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, 100); diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 16fc941c9bd8..7a0872277341 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -51,7 +51,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; @@ -94,7 +94,7 @@ public interface OMMetadataManager extends DBStoreHAManager, AutoCloseable { /** * Returns the Hierarchical ResourceLock used on Metadata DB. */ - HierachicalResourceLockManager getHierarchicalLockManager(); + HierarchicalResourceLockManager getHierarchicalLockManager(); /** * Returns the epoch associated with current OM process. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index b40905a328c1..c7b071a6e8d9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -103,7 +103,7 @@ import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithMetadata; -import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.lock.OmReadOnlyLock; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; @@ -136,7 +136,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager, private DBStore store; private final IOzoneManagerLock lock; - private final HierachicalResourceLockManager hierarchicalLockManager; + private final HierarchicalResourceLockManager hierarchicalLockManager; private TypedTable userTable; private TypedTable volumeTable; @@ -658,7 +658,7 @@ public IOzoneManagerLock getLock() { } @Override - public HierachicalResourceLockManager getHierarchicalLockManager() { + public HierarchicalResourceLockManager getHierarchicalLockManager() { return hierarchicalLockManager; } From b281569bbda234cbda4391f918b9c85cc68772c1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 09:21:21 -0400 Subject: [PATCH 045/126] HDDS-13783. Add tests Change-Id: I33b25ea6f8ddbad9a33d9420b91dbb72b28de1e7 --- .../hadoop/ozone/om/lock/FlatResource.java | 4 +- .../hadoop/ozone/om/OmSnapshotLocalData.java | 10 +- .../hadoop/ozone/om/OmSnapshotManager.java | 3 +- .../snapshot/OmSnapshotLocalDataManager.java | 202 ++++---- .../TestOmSnapshotLocalDataManager.java | 456 ++++++++++++++---- 5 files changed, 495 insertions(+), 180 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java index 73f8357252f2..f4d7e72ece3e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java @@ -26,7 +26,9 @@ public enum FlatResource implements Resource { // Background services lock on a Snapshot. SNAPSHOT_GC_LOCK("SNAPSHOT_GC_LOCK"), // Lock acquired on a Snapshot's RocksDB Handle. - SNAPSHOT_DB_LOCK("SNAPSHOT_DB_LOCK"); + SNAPSHOT_DB_LOCK("SNAPSHOT_DB_LOCK"), + // Lock acquired on a Snapshot's Local Data. + SNAPSHOT_LOCAL_DATA_LOCK("SNAPSHOT_LOCAL_DATA_LOCK"); private String name; private IOzoneManagerLock.ResourceManager resourceManager; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index d78430b6cae6..5de83927952c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -190,6 +190,10 @@ public void addVersionSSTFileInfos(List sstFiles, int previous .map(SstFileInfo::new).collect(Collectors.toList()))); } + public void removeVersionSSTFileInfos(int snapshotVersion) { + this.versionSstFileInfos.remove(snapshotVersion); + } + /** * Returns the checksum of the YAML representation. * @return checksum @@ -275,7 +279,7 @@ public OmSnapshotLocalData copyObject() { * maintain immutability. */ public static class VersionMeta implements CopyObject { - private final int previousSnapshotVersion; + private int previousSnapshotVersion; private final List sstFiles; public VersionMeta(int previousSnapshotVersion, List sstFiles) { @@ -287,6 +291,10 @@ public int getPreviousSnapshotVersion() { return previousSnapshotVersion; } + public void setPreviousSnapshotVersion(int previousSnapshotVersion) { + this.previousSnapshotVersion = previousSnapshotVersion; + } + public List getSstFiles() { return sstFiles; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 743c1e584e25..7b9beb80cf6f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -196,8 +196,7 @@ public final class OmSnapshotManager implements AutoCloseable { private final AtomicInteger inFlightSnapshotCount = new AtomicInteger(0); public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { - this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager(), - ozoneManager.getConfiguration()); + this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager()); boolean isFilesystemSnapshotEnabled = ozoneManager.isFilesystemSnapshotEnabled(); LOG.info("Ozone filesystem snapshot feature is {}.", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 0f658348bfbe..5457c2649d3f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -17,19 +17,16 @@ package org.apache.hadoop.ozone.om.snapshot; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import com.google.common.annotations.VisibleForTesting; import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; -import com.google.common.util.concurrent.Striped; import java.io.File; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.nio.file.StandardCopyOption; import java.util.Arrays; import java.util.Collections; @@ -42,15 +39,11 @@ import java.util.Set; import java.util.Stack; import java.util.UUID; -import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; -import org.apache.commons.lang3.tuple.Triple; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; @@ -58,6 +51,9 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.FlatResource; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager.HierarchicalResourceLock; import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.util.ObjectSerializer; import org.apache.hadoop.ozone.util.YamlSerializer; @@ -84,10 +80,9 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { // Used for acquiring locks on the entire data structure. private final ReadWriteLock fullLock; // Locks should be always acquired by iterating through the snapshot chain to avoid deadlocks. - private Striped locks; + private HierarchicalResourceLockManager locks; - public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager, - OzoneConfiguration configuration) throws IOException { + public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) throws IOException { this.localDataGraph = GraphBuilder.directed().build(); this.omMetadataManager = omMetadataManager; this.snapshotLocalDataSerializer = new YamlSerializer( @@ -100,7 +95,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO }; this.versionNodeMap = new HashMap<>(); this.fullLock = new ReentrantReadWriteLock(); - init(configuration); + init(); } @VisibleForTesting @@ -229,11 +224,8 @@ void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws } } - private void init(OzoneConfiguration configuration) throws IOException { - boolean fair = configuration.getBoolean(OZONE_MANAGER_FAIR_LOCK, OZONE_MANAGER_FAIR_LOCK_DEFAULT); - String stripeSizeKey = OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX + SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME; - int size = configuration.getInt(stripeSizeKey, OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT); - this.locks = SimpleStriped.readWriteLock(size, fair); + private void init() throws IOException { + this.locks = omMetadataManager.getHierarchicalLockManager(); RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); File snapshotDir = new File(store.getSnapshotsParentDir()); @@ -307,6 +299,37 @@ public void close() { } } + private static class LockDataProviderInitResult { + private final OmSnapshotLocalData snapshotLocalData; + private final HierarchicalResourceLock lock; + private final HierarchicalResourceLock previousLock; + private final UUID previousSnapshotId; + + private LockDataProviderInitResult(HierarchicalResourceLock lock, OmSnapshotLocalData snapshotLocalData, + HierarchicalResourceLock previousLock, UUID previousSnapshotId) { + this.lock = lock; + this.snapshotLocalData = snapshotLocalData; + this.previousLock = previousLock; + this.previousSnapshotId = previousSnapshotId; + } + + private HierarchicalResourceLock getLock() { + return lock; + } + + private HierarchicalResourceLock getPreviousLock() { + return previousLock; + } + + private UUID getPreviousSnapshotId() { + return previousSnapshotId; + } + + private OmSnapshotLocalData getSnapshotLocalData() { + return snapshotLocalData; + } + } + /** * The ReadableOmSnapshotLocalDataProvider class is responsible for managing the * access and initialization of local snapshot data in a thread-safe manner. @@ -341,35 +364,35 @@ public void close() { public class ReadableOmSnapshotLocalDataProvider implements AutoCloseable { private final UUID snapshotId; - private final Lock lock; + private final HierarchicalResourceLock lock; + private final HierarchicalResourceLock previousLock; private final OmSnapshotLocalData snapshotLocalData; - private final Lock previousLock; private OmSnapshotLocalData previousSnapshotLocalData; private volatile boolean isPreviousSnapshotLoaded = false; private final UUID resolvedPreviousSnapshotId; protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { - this(snapshotId, locks.get(snapshotId).readLock()); + this(snapshotId, true); } protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapIdToResolve) throws IOException { - this(snapshotId, locks.get(snapshotId).readLock(), null, snapIdToResolve); + this(snapshotId, true, null, snapIdToResolve); } - protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock) throws IOException { - this(snapshotId, lock, null, null); + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, boolean readLock) throws IOException { + this(snapshotId, readLock, null, null); } - protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock, + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, boolean readLock, CheckedSupplier, IOException> snapshotLocalDataSupplier, UUID snapshotIdToBeResolved) throws IOException { this.snapshotId = snapshotId; - this.lock = lock; - Triple pair = initialize(lock, snapshotId, snapshotIdToBeResolved, + LockDataProviderInitResult result = initialize(readLock, snapshotId, snapshotIdToBeResolved, snapshotLocalDataSupplier); - this.snapshotLocalData = pair.getLeft(); - this.previousLock = pair.getMiddle(); - this.resolvedPreviousSnapshotId = pair.getRight(); + this.snapshotLocalData = result.getSnapshotLocalData(); + this.lock = result.getLock(); + this.previousLock = result.getPreviousLock(); + this.resolvedPreviousSnapshotId = result.getPreviousSnapshotId(); this.previousSnapshotLocalData = null; this.isPreviousSnapshotLoaded = false; } @@ -380,27 +403,35 @@ public OmSnapshotLocalData getSnapshotLocalData() { public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { if (!isPreviousSnapshotLoaded) { - File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); - this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : - snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + if (resolvedPreviousSnapshotId != null) { + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); + this.previousSnapshotLocalData = snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + } this.isPreviousSnapshotLoaded = true; } return previousSnapshotLocalData; } + private HierarchicalResourceLock acquireLock(UUID snapshotId, boolean readLock) throws IOException { + HierarchicalResourceLock acquiredLock = readLock ? locks.acquireReadLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, + snapshotId.toString()) : locks.acquireWriteLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, snapshotId.toString()); + if (!acquiredLock.isLockAcquired()) { + throw new IOException("Unable to acquire lock for snapshotId: " + snapshotId); + } + return acquiredLock; + } + /** * Intializer the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the * snapshotId to be resolved by iterating through the chain of previous snapshot ids. */ - private Triple initialize(Lock snapIdLock, UUID snapId, UUID toResolveSnapshotId, + private LockDataProviderInitResult initialize(boolean readLock, UUID snapId, UUID toResolveSnapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { - snapIdLock.lock(); - // Get the Lock instance for the snapshot id and track it. - ReadWriteLock lockIdAcquired = locks.get(snapId); - ReadWriteLock previousReadLockAcquired = null; - boolean haspreviousReadLockAcquiredAcquired = false; + HierarchicalResourceLock snapIdLock = null; + HierarchicalResourceLock previousReadLockAcquired = null; try { + snapIdLock = acquireLock(snapId, readLock); snapshotLocalDataSupplier = snapshotLocalDataSupplier == null ? () -> { File snapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(snapId)); return Pair.of(snapshotLocalDataSerializer.load(snapshotLocalDataFile), snapshotLocalDataFile); @@ -417,25 +448,18 @@ private Triple initialize(Lock snapIdLock, UUID // to iterate through the chain. UUID previousSnapshotId = ssLocalData.getPreviousSnapshotId(); if (previousSnapshotId != null) { - if (versionNodeMap.containsKey(previousSnapshotId)) { + if (!versionNodeMap.containsKey(previousSnapshotId)) { throw new IOException(String.format("Operating on snapshot id : %s with previousSnapshotId: %s invalid " + "since previousSnapshotId is not loaded.", snapId, previousSnapshotId)); } toResolveSnapshotId = toResolveSnapshotId == null ? ssLocalData.getPreviousSnapshotId() : toResolveSnapshotId; - previousReadLockAcquired = locks.get(previousSnapshotId); - // Stripe lock could return the same lock object for multiple snapshotIds so in case a write lock is - // acquired previously on the same lock then this could cause a deadlock. If the same lock instance is - // returned then acquiring this read lock is unnecessary. - if (lockIdAcquired == previousReadLockAcquired) { - previousReadLockAcquired = null; - } - if (previousReadLockAcquired != null) { - previousReadLockAcquired.readLock().lock(); - haspreviousReadLockAcquiredAcquired = true; - } - Map previousVersionNodeMap = versionNodeMap.get(previousSnapshotId) - .getSnapshotVersions(); + previousReadLockAcquired = acquireLock(previousSnapshotId, true); + // Create a copy of the previous versionMap to get the previous versions corresponding to the previous + // snapshot. This map would mutated to resolve the previous snapshot's version corresponding to the + // toResolveSnapshotId by iterating through the chain of previous snapshot ids. + Map previousVersionNodeMap = + new HashMap<>(versionNodeMap.get(previousSnapshotId).getSnapshotVersions()); UUID currentIteratedSnapshotId = previousSnapshotId; // Iterate through the chain of previous snapshot ids until the snapshot id to be resolved is found. while (!Objects.equals(currentIteratedSnapshotId, toResolveSnapshotId)) { @@ -454,25 +478,8 @@ private Triple initialize(Lock snapIdLock, UUID currentIteratedSnapshotId, snapId, toResolveSnapshotId)); } UUID previousId = previousIds.iterator().next(); - ReadWriteLock lockToBeAcquired = locks.get(previousId); - // If stripe lock returns the same lock object corresponding to snapshot id then no read lock needs to be - // acquired. - if (lockToBeAcquired == lockIdAcquired) { - lockToBeAcquired = null; - } - if (lockToBeAcquired != null) { - // If a read lock has already been acquired on the same lock based on the previous iteration snapshot id - // then no need to acquire another read lock on the same lock and this lock could just piggyback on the - // same lock. - if (lockToBeAcquired != previousReadLockAcquired) { - lockToBeAcquired.readLock().lock(); - haspreviousReadLockAcquiredAcquired = true; - } else { - // Set the previous read lock to null since the same lock instance is going to be used for current - // iteration lock as well. - previousReadLockAcquired = null; - } - } + HierarchicalResourceLock previousToPreviousReadLockAcquired = acquireLock(previousId, true); + try { // Get the version node for the snapshot and update the version node to the successor to point to the // previous node. @@ -486,15 +493,15 @@ private Triple initialize(Lock snapIdLock, UUID throw new IOException(String.format("Snapshot %s version %d doesn't have successor", currentIteratedSnapshotId, entry.getValue())); } + // Set the version node for iterated version to the successor corresponding to the previous snapshot id. entry.setValue(versionNode.iterator().next()); } } finally { - // Release the read lock acquired on the previous snapshot id if it was acquired. Now that the instance + // Release the read lock acquired on the previous snapshot id acquired. Now that the instance // is no longer needed we can release the read lock for the snapshot iterated in the previous snapshot. - if (previousReadLockAcquired != null) { - previousReadLockAcquired.readLock().unlock(); - } - previousReadLockAcquired = lockToBeAcquired; + // Make previousToPrevious previous for next iteration. + previousReadLockAcquired.close(); + previousReadLockAcquired = previousToPreviousReadLockAcquired; currentIteratedSnapshotId = previousId; } } @@ -502,6 +509,8 @@ private Triple initialize(Lock snapIdLock, UUID Map versionMetaMap = ssLocalData.getVersionSstFileInfos(); for (Map.Entry entry : versionMetaMap.entrySet()) { OmSnapshotLocalData.VersionMeta versionMeta = entry.getValue(); + // Get the relative version node which corresponds to the toResolveSnapshotId corresponding to the + // versionMeta which points to a particular version in the previous snapshot LocalDataVersionNode relativePreviousVersionNode = previousVersionNodeMap.get(versionMeta.getPreviousSnapshotVersion()); if (relativePreviousVersionNode == null) { @@ -509,28 +518,33 @@ private Triple initialize(Lock snapIdLock, UUID " with version : %d against previous snapshot %s previous version : %d", snapId, entry.getKey(), toResolveSnapshotId, versionMeta.getPreviousSnapshotVersion())); } + // Set the previous snapshot version to the relativePreviousVersionNode which was captured. + versionMeta.setPreviousSnapshotVersion(relativePreviousVersionNode.getVersion()); } } else { toResolveSnapshotId = null; } - return Triple.of(ssLocalData, previousReadLockAcquired != null ? previousReadLockAcquired.readLock() : null, - toResolveSnapshotId); + return new LockDataProviderInitResult(snapIdLock, ssLocalData, previousReadLockAcquired, toResolveSnapshotId); } catch (IOException e) { // Release all the locks in case of an exception and rethrow the exception. - if (previousReadLockAcquired != null && haspreviousReadLockAcquiredAcquired) { - previousReadLockAcquired.readLock().unlock(); + if (previousReadLockAcquired != null) { + previousReadLockAcquired.close(); + } + if (snapIdLock != null) { + snapIdLock.close(); } - snapIdLock.unlock(); throw e; } } @Override - public void close() { + public void close() throws IOException { if (previousLock != null) { - previousLock.unlock(); + previousLock.close(); + } + if (lock != null) { + lock.close(); } - lock.unlock(); } } @@ -552,18 +566,18 @@ public void close() { public final class WritableOmSnapshotLocalDataProvider extends ReadableOmSnapshotLocalDataProvider { private WritableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { - super(snapshotId, locks.get(snapshotId).writeLock()); + super(snapshotId, false); fullLock.readLock().lock(); } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapshotIdToBeResolved) throws IOException { - super(snapshotId, locks.get(snapshotId).writeLock(), null, snapshotIdToBeResolved); + super(snapshotId, false, null, snapshotIdToBeResolved); fullLock.readLock().lock(); } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { - super(snapshotId, locks.get(snapshotId).writeLock(), snapshotLocalDataSupplier, null); + super(snapshotId, false, snapshotLocalDataSupplier, null); fullLock.readLock().lock(); } @@ -607,7 +621,9 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { List sstFiles = OmSnapshotManager.getSnapshotSSTFileList(snapshotStore); - this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, getPreviousSnapshotLocalData().getVersion()); + OmSnapshotLocalData previousSnapshotLocalData = getPreviousSnapshotLocalData(); + this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, previousSnapshotLocalData == null ? 0 : + previousSnapshotLocalData.getVersion()); } public synchronized void commit() throws IOException { @@ -623,13 +639,13 @@ public synchronized void commit() throws IOException { throw new IOException("Unable to delete tmp file " + tmpFilePath); } snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); - FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, + Files.move(tmpFile.toPath(), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); upsertNode(super.snapshotId, localDataVersionNodes); } @Override - public void close() { + public void close() throws IOException { super.close(); fullLock.readLock().unlock(); } @@ -652,6 +668,10 @@ private UUID getPreviousSnapshotId() { return previousSnapshotId; } + private int getVersion() { + return version; + } + @Override public boolean equals(Object o) { if (!(o instanceof LocalDataVersionNode)) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index e63a557ca83c..43b3f1838521 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -27,43 +27,51 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.Mockito.doAnswer; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.reset; import static org.mockito.Mockito.when; -import com.google.common.util.concurrent.Striped; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import java.io.File; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.TreeMap; import java.util.UUID; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReadWriteLock; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; +import org.apache.commons.compress.utils.Sets; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; +import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.FlatResource; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager.HierarchicalResourceLock; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.WritableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ozone.compaction.log.SstFileInfo; +import org.assertj.core.util.Lists; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -71,8 +79,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.mockito.Mock; -import org.mockito.MockedStatic; import org.mockito.MockitoAnnotations; import org.rocksdb.LiveFileMetaData; import org.yaml.snakeyaml.Yaml; @@ -83,12 +91,14 @@ public class TestOmSnapshotLocalDataManager { private static YamlSerializer snapshotLocalDataYamlSerializer; - - private static OzoneConfiguration conf; + private static List lockCapturor; @Mock private OMMetadataManager omMetadataManager; + @Mock + private HierarchicalResourceLockManager lockManager; + @Mock private RDBStore rdbStore; @@ -110,7 +120,6 @@ public class TestOmSnapshotLocalDataManager { @BeforeAll public static void setupClass() { - conf = new OzoneConfiguration(); snapshotLocalDataYamlSerializer = new YamlSerializer( new OmSnapshotLocalDataYaml.YamlFactory()) { @@ -119,10 +128,11 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO data.computeAndSetChecksum(yaml); } }; + lockCapturor = new ArrayList<>(); } @AfterAll - public static void teardownClass() throws IOException { + public static void teardownClass() { snapshotLocalDataYamlSerializer.close(); snapshotLocalDataYamlSerializer = null; } @@ -133,15 +143,15 @@ public void setUp() throws IOException { // Setup mock behavior when(omMetadataManager.getStore()).thenReturn(rdbStore); - + when(omMetadataManager.getHierarchicalLockManager()).thenReturn(lockManager); this.snapshotsDir = tempDir.resolve("snapshots").toFile(); FileUtils.deleteDirectory(snapshotsDir); assertTrue(snapshotsDir.exists() || snapshotsDir.mkdirs()); File dbLocation = tempDir.resolve("db").toFile(); FileUtils.deleteDirectory(dbLocation); assertTrue(dbLocation.exists() || dbLocation.mkdirs()); + mockLockManager(); - when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); when(rdbStore.getDbLocation()).thenReturn(dbLocation); } @@ -156,92 +166,368 @@ public void tearDown() throws Exception { } } - private String getReadLockMessageAcquire(int index) { - return READ_LOCK_MESSAGE_ACQUIRE + index; + private String getReadLockMessageAcquire(UUID snapshotId) { + return READ_LOCK_MESSAGE_ACQUIRE + " " + FlatResource.SNAPSHOT_LOCAL_DATA_LOCK + " " + snapshotId; } - private String getReadLockMessageUnlock(int index) { - return READ_LOCK_MESSAGE_UNLOCK + index; + private String getReadLockMessageRelease(UUID snapshotId) { + return READ_LOCK_MESSAGE_UNLOCK + " " + FlatResource.SNAPSHOT_LOCAL_DATA_LOCK + " " + snapshotId; } - private String getWriteLockMessageAcquire(int index) { - return WRITE_LOCK_MESSAGE_ACQUIRE + index; + private String getWriteLockMessageAcquire(UUID snapshotId) { + return WRITE_LOCK_MESSAGE_ACQUIRE + " " + FlatResource.SNAPSHOT_LOCAL_DATA_LOCK + " " + snapshotId; } - private String getWriteLockMessageUnlock(int index) { - return WRITE_LOCK_MESSAGE_UNLOCK + index; + private String getWriteLockMessageRelease(UUID snapshotId) { + return WRITE_LOCK_MESSAGE_UNLOCK + " " + FlatResource.SNAPSHOT_LOCAL_DATA_LOCK + " " + snapshotId; } - private MockedStatic mockStripedLock(Map lockMap, int numLocks, - List messageCaptorer) { - MockedStatic mockedStatic = mockStatic(SimpleStriped.class); - Striped stripedLock = mock(Striped.class); - - List readWriteLocks = new ArrayList<>(); - for (int idx = 0; idx < numLocks; idx++) { - final int lockIndex = idx; - ReadWriteLock readWriteLock = mock(ReadWriteLock.class); - Lock readLock = mock(Lock.class); - Lock writeLock = mock(Lock.class); - when(readWriteLock.readLock()).thenReturn(readLock); - when(readWriteLock.writeLock()).thenReturn(writeLock); - doAnswer(invocationOnMock -> { - messageCaptorer.add(getReadLockMessageAcquire(lockIndex)); - return null; - }).when(readLock).lock(); - doAnswer(invocationOnMock -> { - messageCaptorer.add(getReadLockMessageUnlock(lockIndex)); - return null; - }).when(readLock).unlock(); + private HierarchicalResourceLock getHierarchicalResourceLock(FlatResource resource, String key, boolean isWriteLock) { + return new HierarchicalResourceLock() { + @Override + public boolean isLockAcquired() { + return true; + } - doAnswer(invocationOnMock -> { - messageCaptorer.add(getWriteLockMessageAcquire(lockIndex)); - return null; - }).when(writeLock).lock(); - doAnswer(invocationOnMock -> { - messageCaptorer.add(getWriteLockMessageUnlock(lockIndex)); - return null; - }).when(writeLock).unlock(); - } - when(stripedLock.get(any())).thenAnswer(i -> { - if (lockMap.containsKey(i.getArgument(0))) { - return readWriteLocks.get(lockMap.get(i.getArgument(0))); + @Override + public void close() { + if (isWriteLock) { + lockCapturor.add(WRITE_LOCK_MESSAGE_UNLOCK + " " + resource + " " + key); + } else { + lockCapturor.add(READ_LOCK_MESSAGE_UNLOCK + " " + resource + " " + key); + } } - return readWriteLocks.get(0); - }); - mockedStatic.when(() -> SimpleStriped.readWriteLock(anyInt(), anyBoolean())).thenReturn(stripedLock); - return mockedStatic; + }; } - private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataManager, - int numberOfSnapshots) { - List snapshotInfos = new ArrayList<>(); - SnapshotInfo previouseSnapshotInfo = null; + private void mockLockManager() throws IOException { + lockCapturor.clear(); + reset(lockManager); + when(lockManager.acquireReadLock(any(FlatResource.class), anyString())) + .thenAnswer(i -> { + lockCapturor.add(READ_LOCK_MESSAGE_ACQUIRE + " " + i.getArgument(0) + " " + i.getArgument(1)); + return getHierarchicalResourceLock(i.getArgument(0), i.getArgument(1), false); + }); + when(lockManager.acquireWriteLock(any(FlatResource.class), anyString())) + .thenAnswer(i -> { + lockCapturor.add(WRITE_LOCK_MESSAGE_ACQUIRE + " " + i.getArgument(0) + " " + i.getArgument(1)); + return getHierarchicalResourceLock(i.getArgument(0), i.getArgument(1), true); + }); + } + private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataManager, + int numberOfSnapshots) throws IOException { + SnapshotInfo previousSnapshotInfo = null; + int counter = 0; + Map> liveFileMetaDataMap = new HashMap<>(); + liveFileMetaDataMap.put(KEY_TABLE, + Lists.newArrayList(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key2"))); + liveFileMetaDataMap.put(FILE_TABLE, Lists.newArrayList(createMockLiveFileMetaData("file2.sst", FILE_TABLE, "key1", + "key2"))); + liveFileMetaDataMap.put(DIRECTORY_TABLE, Lists.newArrayList(createMockLiveFileMetaData("file2.sst", DIRECTORY_TABLE, "key1", + "key2"))); + liveFileMetaDataMap.put("col1", Lists.newArrayList(createMockLiveFileMetaData("file2.sst", "col1", "key1", + "key2"))); + List snapshotIds = new ArrayList<>(); for (int i = 0; i < numberOfSnapshots; i++) { - java.util.UUID snapshotId = java.util.UUID.randomUUID(); - SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, previouseSnapshotInfo == null ? null - : previouseSnapshotInfo.getSnapshotId()); - OmSnapshotLocalData localData = createMockLocalData(snapshotId, snapshotInfo.getPathPreviousSnapshotId()); - - snapshotInfos.add(snapshotInfo); - previouseSnapshotInfo = snapshotInfo; + UUID snapshotId = UUID.randomUUID(); + SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, previousSnapshotInfo == null ? null + : previousSnapshotInfo.getSnapshotId()); + mockSnapshotStore(snapshotId, liveFileMetaDataMap.values().stream() + .flatMap(Collection::stream).collect(Collectors.toList())); + localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); + previousSnapshotInfo = snapshotInfo; + for (String table : liveFileMetaDataMap.keySet()) { + liveFileMetaDataMap.get(table).add( + createMockLiveFileMetaData("file" + counter++ + ".sst", table, "key1", "key4")); + } + snapshotIds.add(snapshotId); } - return null; + return snapshotIds; + } + + private void mockSnapshotStore(UUID snapshotId, List sstFiles) throws RocksDatabaseException { + // Setup snapshot store mock + File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); + assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs()); + + when(snapshotStore.getDbLocation()).thenReturn(snapshotDbLocation); + RocksDatabase rocksDatabase = mock(RocksDatabase.class); + when(snapshotStore.getDb()).thenReturn(rocksDatabase); + when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); } /** * Reading Snap1 against snap5 */ + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 20); + for (int start = 0; start < snapshotIds.size(); start++) { + for (int end = start + 1; end < snapshotIds.size(); end++) { + UUID startSnapshotId = snapshotIds.get(start); + UUID endSnapshotId = snapshotIds.get(end); + lockCapturor.clear(); + int logCaptorIdx = 0; + try (ReadableOmSnapshotLocalDataProvider omSnapshotLocalDataProvider = + read ? localDataManager.getOmSnapshotLocalData(endSnapshotId, startSnapshotId) : + localDataManager.getWritableOmSnapshotLocalData(endSnapshotId, startSnapshotId)) { + OmSnapshotLocalData snapshotLocalData = omSnapshotLocalDataProvider.getSnapshotLocalData(); + OmSnapshotLocalData previousSnapshot = omSnapshotLocalDataProvider.getPreviousSnapshotLocalData(); + assertEquals(startSnapshotId, previousSnapshot.getSnapshotId()); + assertEquals(endSnapshotId, snapshotLocalData.getSnapshotId()); + if (read) { + assertEquals(getReadLockMessageAcquire(endSnapshotId), lockCapturor.get(logCaptorIdx++)); + } else { + assertEquals(getWriteLockMessageAcquire(endSnapshotId), lockCapturor.get(logCaptorIdx++)); + } + int idx = end - 1; + UUID previousSnapId = snapshotIds.get(idx--); + assertEquals(getReadLockMessageAcquire(previousSnapId), lockCapturor.get(logCaptorIdx++)); + while (idx >= start) { + UUID prevPrevSnapId = snapshotIds.get(idx--); + assertEquals(getReadLockMessageAcquire(prevPrevSnapId), lockCapturor.get(logCaptorIdx++)); + assertEquals(getReadLockMessageRelease(previousSnapId), lockCapturor.get(logCaptorIdx++)); + previousSnapId = prevPrevSnapId; + } + } + assertEquals(getReadLockMessageRelease(startSnapshotId), lockCapturor.get(logCaptorIdx++)); + if (read) { + assertEquals(getReadLockMessageRelease(endSnapshotId), lockCapturor.get(logCaptorIdx++)); + } else { + assertEquals(getWriteLockMessageRelease(endSnapshotId), lockCapturor.get(logCaptorIdx++)); + } + assertEquals(lockCapturor.size(), logCaptorIdx); + } + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testVersionLockResolution(boolean read) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 5); + for (int snapIdx = 0; snapIdx < snapshotIds.size(); snapIdx++) { + UUID snapId = snapshotIds.get(snapIdx); + UUID expectedPreviousSnapId = snapIdx - 1 >= 0 ? snapshotIds.get(snapIdx - 1) : null; + lockCapturor.clear(); + int logCaptorIdx = 0; + try (ReadableOmSnapshotLocalDataProvider omSnapshotLocalDataProvider = + read ? localDataManager.getOmSnapshotLocalData(snapId) : + localDataManager.getWritableOmSnapshotLocalData(snapId)) { + OmSnapshotLocalData snapshotLocalData = omSnapshotLocalDataProvider.getSnapshotLocalData(); + OmSnapshotLocalData previousSnapshot = omSnapshotLocalDataProvider.getPreviousSnapshotLocalData(); + assertEquals(snapId, snapshotLocalData.getSnapshotId()); + assertEquals(expectedPreviousSnapId, previousSnapshot == null ? null : + previousSnapshot.getSnapshotId()); + if (read) { + assertEquals(getReadLockMessageAcquire(snapId), lockCapturor.get(logCaptorIdx++)); + } else { + assertEquals(getWriteLockMessageAcquire(snapId), lockCapturor.get(logCaptorIdx++)); + } + if (expectedPreviousSnapId != null) { + assertEquals(getReadLockMessageAcquire(expectedPreviousSnapId), lockCapturor.get(logCaptorIdx++)); + } + } + if (expectedPreviousSnapId != null) { + assertEquals(getReadLockMessageRelease(expectedPreviousSnapId), lockCapturor.get(logCaptorIdx++)); + } + if (read) { + assertEquals(getReadLockMessageRelease(snapId), lockCapturor.get(logCaptorIdx++)); + } else { + assertEquals(getWriteLockMessageRelease(snapId), lockCapturor.get(logCaptorIdx++)); + } + assertEquals(lockCapturor.size(), logCaptorIdx); + } + } + + @Test + public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExisting() throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 2); + UUID snapId = snapshotIds.get(1); + try (WritableOmSnapshotLocalDataProvider omSnapshotLocalDataProvider = + localDataManager.getWritableOmSnapshotLocalData(snapId)) { + OmSnapshotLocalData snapshotLocalData = omSnapshotLocalDataProvider.getSnapshotLocalData(); + snapshotLocalData.addVersionSSTFileInfos(Lists.newArrayList(createMockLiveFileMetaData("file1.sst", KEY_TABLE, + "key1", "key2")), 3); + + IOException ex = assertThrows(IOException.class, omSnapshotLocalDataProvider::commit); + System.out.println(ex.getMessage()); + assertTrue(ex.getMessage().contains("since previous snapshot with version hasn't been loaded")); + } + } + @Test - public void testLockOrderingWithOverLappingLocks() { + public void testAddVersionFromRDB() throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 2); + addVersionsToLocalData(localDataManager, snapshotIds.get(0), ImmutableMap.of(4, 5, 6, 8)); + UUID snapId = snapshotIds.get(1); + List newVersionSstFiles = + Lists.newArrayList(createMockLiveFileMetaData("file5.sst", KEY_TABLE, "key1", "key2"), + createMockLiveFileMetaData("file6.sst", FILE_TABLE, "key1", "key2"), + createMockLiveFileMetaData("file7.sst", KEY_TABLE, "key1", "key2"), + createMockLiveFileMetaData("file1.sst", "col1", "key1", "key2")); + try (WritableOmSnapshotLocalDataProvider snap = + localDataManager.getWritableOmSnapshotLocalData(snapId)) { + mockSnapshotStore(snapId, newVersionSstFiles); + snap.addSnapshotVersion(snapshotStore); + snap.commit(); + } + validateVersions(localDataManager, snapId, 1, Sets.newHashSet(0, 1)); + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + OmSnapshotLocalData.VersionMeta versionMeta = snapshotLocalData.getVersionSstFileInfos().get(1); + assertEquals(6, versionMeta.getPreviousSnapshotVersion()); + List expectedLiveFileMetaData = + newVersionSstFiles.subList(0, 3).stream().map(SstFileInfo::new).collect(Collectors.toList()); + assertEquals(expectedLiveFileMetaData, versionMeta.getSstFiles()); + } + + } + + private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManager, UUID snapId, int expectedVersion, + Set expectedVersions) throws IOException { + try (ReadableOmSnapshotLocalDataProvider snap = snapshotLocalDataManager.getOmSnapshotLocalData(snapId)) { + assertEquals(expectedVersion, snap.getSnapshotLocalData().getVersion()); + assertEquals(expectedVersions, snap.getSnapshotLocalData().getVersionSstFileInfos().keySet()); + } + } + + /** + * Validates write-time version propagation and removal rules when the previous + * snapshot already has a concrete version recorded. + * + * Test flow: + * 1) Create two snapshots in a chain: {@code prevSnapId -> snapId}. + * 2) For {@code prevSnapId}: set {@code version=3} and add SST metadata for version {@code 0}; commit. + * 3) For {@code snapId}: set {@code version=4} and add SST metadata for version {@code 4}; commit. + * After commit, versions resolve to {@code prev.version=4} and {@code snap.version=5}, and their + * version maps are {@code {0,4}} and {@code {0,5}} respectively (base version 0 plus the current one). + * 4) If {@code nextVersionExisting} is {@code true}: + * - Attempt to remove version {@code 4} from {@code prevSnapId}; expect {@link IOException} because + * the successor snapshot still exists at version {@code 5} and depends on {@code prevSnapId}. + * - Validate that versions and version maps remain unchanged. + * Else ({@code false}): + * - Remove version {@code 5} from {@code snapId} and commit, then remove version {@code 4} from + * {@code prevSnapId} and commit. + * - Validate that both snapshots now only contain the base version {@code 0}. + * + * This ensures a snapshot cannot drop a version that still has a dependent successor, and that removals + * are allowed only after dependents are cleared. + * + * @param nextVersionExisting whether the successor snapshot's version still exists ({@code true}) or is + * removed first ({@code false}) + * @throws IOException if commit validation fails as expected in the protected case + */ + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testWriteVersionValidation(boolean nextVersionExisting) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 2); + UUID prevSnapId = snapshotIds.get(0); + UUID snapId = snapshotIds.get(1); + addVersionsToLocalData(localDataManager, prevSnapId, ImmutableMap.of(4, 1)); + addVersionsToLocalData(localDataManager, snapId, ImmutableMap.of(5, 4)); + + validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0, 5)); + validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0, 4)); + + if (nextVersionExisting) { + try (WritableOmSnapshotLocalDataProvider prevSnap = localDataManager.getWritableOmSnapshotLocalData(prevSnapId)) { + prevSnap.getSnapshotLocalData().removeVersionSSTFileInfos(4); + IOException ex = assertThrows(IOException.class, prevSnap::commit); + assertTrue(ex.getMessage().contains("Cannot remove Snapshot " + prevSnapId + " with version : 4 since it " + + "still has predecessors")); + } + validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0, 5)); + validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0, 4)); + } else { + try (WritableOmSnapshotLocalDataProvider snap = localDataManager.getWritableOmSnapshotLocalData(snapId)) { + snap.getSnapshotLocalData().removeVersionSSTFileInfos(5); + snap.commit(); + } + + try (WritableOmSnapshotLocalDataProvider prevSnap = localDataManager.getWritableOmSnapshotLocalData(prevSnapId)) { + prevSnap.getSnapshotLocalData().removeVersionSSTFileInfos(4); + prevSnap.commit(); + } + validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0)); + validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0)); + } + } + private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalDataManager, + UUID snapId, Map versionMap) throws IOException { + try (WritableOmSnapshotLocalDataProvider snap = snapshotLocalDataManager.getWritableOmSnapshotLocalData(snapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + for (Map.Entry version : versionMap.entrySet().stream() + .sorted(Map.Entry.comparingByKey()).collect(Collectors.toList())) { + snapshotLocalData.setVersion(version.getKey() - 1); + snapshotLocalData.addVersionSSTFileInfos(ImmutableList.of(createMockLiveFileMetaData("file" + version + + ".sst", KEY_TABLE, "key1", "key2")), version.getValue()); + } + snap.commit(); + } + try (ReadableOmSnapshotLocalDataProvider snap = snapshotLocalDataManager.getOmSnapshotLocalData(snapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + for (int version : versionMap.keySet()) { + assertTrue(snapshotLocalData.getVersionSstFileInfos().containsKey(version)); + } + } + } + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testVersionResolution(boolean read) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 5); + List> versionMaps = Arrays.asList( + ImmutableMap.of(4, 1, 6, 3, 8, 9, 11, 15), + ImmutableMap.of(5, 4, 6, 8, 10, 11), + ImmutableMap.of(1, 5, 3, 5, 8, 10), + ImmutableMap.of(1, 1, 2, 3, 5, 8), + ImmutableMap.of(1, 1, 11, 2, 20, 5, 30, 2) + ); + for (int i = 0; i < snapshotIds.size(); i++) { + addVersionsToLocalData(localDataManager, snapshotIds.get(i), versionMaps.get(i)); + } + for (int start = 0; start < snapshotIds.size(); start++) { + for (int end = start + 1; end < snapshotIds.size(); end++) { + UUID prevSnapId = snapshotIds.get(start); + UUID snapId = snapshotIds.get(end); + Map versionMap = new HashMap<>(versionMaps.get(end)); + versionMap.put(0, 0); + for (int idx = end - 1; idx > start; idx--) { + for (Map.Entry version : versionMap.entrySet()) { + version.setValue(versionMaps.get(idx).getOrDefault(version.getValue(), 0)); + } + } + try (ReadableOmSnapshotLocalDataProvider snap = read ? + localDataManager.getOmSnapshotLocalData(snapId, prevSnapId) : + localDataManager.getWritableOmSnapshotLocalData(snapId, prevSnapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + OmSnapshotLocalData prevSnapshotLocalData = snap.getPreviousSnapshotLocalData(); + assertEquals(prevSnapshotLocalData.getSnapshotId(), snapshotLocalData.getPreviousSnapshotId()); + assertEquals(prevSnapId, snapshotLocalData.getPreviousSnapshotId()); + assertEquals(snapId, snapshotLocalData.getSnapshotId()); + assertTrue(snapshotLocalData.getVersionSstFileInfos().size() > 1); + snapshotLocalData.getVersionSstFileInfos() + .forEach((version, versionMeta) -> { + assertEquals(versionMap.get(version), versionMeta.getPreviousSnapshotVersion()); + }); + } + } + } } + @Test public void testConstructor() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); assertNotNull(localDataManager); } @@ -250,7 +536,7 @@ public void testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOExce UUID snapshotId = UUID.randomUUID(); SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); File yamlPath = new File(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); assertNotNull(yamlPath); @@ -281,7 +567,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { RocksDatabase rocksDatabase = mock(RocksDatabase.class); when(snapshotStore.getDb()).thenReturn(rocksDatabase); when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); @@ -304,7 +590,7 @@ public void testGetOmSnapshotLocalDataWithSnapshotInfo() throws IOException { // Create and write snapshot local data file OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); // Write the file manually for testing Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId())); @@ -325,7 +611,7 @@ public void testGetOmSnapshotLocalDataWithMismatchedSnapshotId() throws IOExcept // Create local data with wrong snapshot ID OmSnapshotLocalData localData = createMockLocalData(wrongSnapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotId)); writeLocalDataToFile(localData, yamlPath); @@ -341,7 +627,7 @@ public void testGetOmSnapshotLocalDataWithFile() throws IOException { OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); Path yamlPath = tempDir.resolve("test-snapshot.yaml"); writeLocalDataToFile(localData, yamlPath); @@ -359,7 +645,7 @@ public void testAddVersionNodeWithDependents() throws IOException { .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); UUID snapshotId = versionIds.get(0); UUID previousSnapshotId = versionIds.get(1); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); // Create snapshot directory structure and files createSnapshotLocalDataFile(snapshotId, previousSnapshotId); createSnapshotLocalDataFile(previousSnapshotId, null); @@ -375,7 +661,7 @@ public void testAddVersionNodeWithDependentsAlreadyExists() throws IOException { createSnapshotLocalDataFile(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); @@ -397,7 +683,7 @@ public void testInitWithExistingYamlFiles() throws IOException { createSnapshotLocalDataFile(snapshotId, previousSnapshotId); // Initialize - should load existing files - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); assertNotNull(localDataManager); Map versionMap = @@ -417,13 +703,13 @@ public void testInitWithInvalidPathThrowsException() throws IOException { // Should throw IOException during init assertThrows(IOException.class, () -> { - new OmSnapshotLocalDataManager(omMetadataManager, conf); + new OmSnapshotLocalDataManager(omMetadataManager); }); } @Test public void testClose() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); // Should not throw exception localDataManager.close(); From 1ad24b4e3824fdce86e22fc79c243ff220360bb2 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 09:26:30 -0400 Subject: [PATCH 046/126] HDDS-13783. Fix checkstyle Change-Id: I5d861cc0120cf89b43cdb961734931396736a27a --- .../snapshot/OmSnapshotLocalDataManager.java | 9 ++++----- .../TestOmSnapshotLocalDataManager.java | 19 ++++++++++--------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 5457c2649d3f..8f6ce69ad554 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -42,7 +42,6 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -299,7 +298,7 @@ public void close() { } } - private static class LockDataProviderInitResult { + private static final class LockDataProviderInitResult { private final OmSnapshotLocalData snapshotLocalData; private final HierarchicalResourceLock lock; private final HierarchicalResourceLock previousLock; @@ -412,11 +411,11 @@ public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IO return previousSnapshotLocalData; } - private HierarchicalResourceLock acquireLock(UUID snapshotId, boolean readLock) throws IOException { + private HierarchicalResourceLock acquireLock(UUID snapId, boolean readLock) throws IOException { HierarchicalResourceLock acquiredLock = readLock ? locks.acquireReadLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, - snapshotId.toString()) : locks.acquireWriteLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, snapshotId.toString()); + snapId.toString()) : locks.acquireWriteLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, snapId.toString()); if (!acquiredLock.isLockAcquired()) { - throw new IOException("Unable to acquire lock for snapshotId: " + snapshotId); + throw new IOException("Unable to acquire lock for snapshotId: " + snapId); } return acquiredLock; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 43b3f1838521..d273e758c3f1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -41,14 +41,12 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TreeMap; import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -215,7 +213,7 @@ private void mockLockManager() throws IOException { }); } - private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataManager, + private List createSnapshotLocalData(OmSnapshotLocalDataManager snapshotLocalDataManager, int numberOfSnapshots) throws IOException { SnapshotInfo previousSnapshotInfo = null; int counter = 0; @@ -224,8 +222,8 @@ private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataM Lists.newArrayList(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key2"))); liveFileMetaDataMap.put(FILE_TABLE, Lists.newArrayList(createMockLiveFileMetaData("file2.sst", FILE_TABLE, "key1", "key2"))); - liveFileMetaDataMap.put(DIRECTORY_TABLE, Lists.newArrayList(createMockLiveFileMetaData("file2.sst", DIRECTORY_TABLE, "key1", - "key2"))); + liveFileMetaDataMap.put(DIRECTORY_TABLE, Lists.newArrayList(createMockLiveFileMetaData("file2.sst", + DIRECTORY_TABLE, "key1", "key2"))); liveFileMetaDataMap.put("col1", Lists.newArrayList(createMockLiveFileMetaData("file2.sst", "col1", "key1", "key2"))); List snapshotIds = new ArrayList<>(); @@ -235,7 +233,7 @@ private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataM : previousSnapshotInfo.getSnapshotId()); mockSnapshotStore(snapshotId, liveFileMetaDataMap.values().stream() .flatMap(Collection::stream).collect(Collectors.toList())); - localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); + snapshotLocalDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); previousSnapshotInfo = snapshotInfo; for (String table : liveFileMetaDataMap.keySet()) { liveFileMetaDataMap.get(table).add( @@ -258,7 +256,12 @@ private void mockSnapshotStore(UUID snapshotId, List sstFiles) } /** - * Reading Snap1 against snap5 + * Checks lock orders taken i.e. while reading a snapshot against the previous snapshot. + * Depending on read or write locks are acquired on the snapshotId and read lock is acquired on the previous + * snapshot. Once the instance is closed the read lock on previous snapshot is released followed by releasing the + * lock on the snapshotId. + * @param read + * @throws IOException */ @ParameterizedTest @ValueSource(booleans = {true, false}) @@ -386,7 +389,6 @@ public void testAddVersionFromRDB() throws IOException { newVersionSstFiles.subList(0, 3).stream().map(SstFileInfo::new).collect(Collectors.toList()); assertEquals(expectedLiveFileMetaData, versionMeta.getSstFiles()); } - } private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManager, UUID snapId, int expectedVersion, @@ -524,7 +526,6 @@ public void testVersionResolution(boolean read) throws IOException { } } - @Test public void testConstructor() throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); From d62991103960df7deb4fe1eab9d303be13a64deb Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 09:39:11 -0400 Subject: [PATCH 047/126] HDDS-13783. Fix findbugs Change-Id: Idf069e026ada0f57f664c64299a856809fd63344 --- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index d273e758c3f1..dcea25465949 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -235,9 +235,9 @@ private List createSnapshotLocalData(OmSnapshotLocalDataManager snapshotLo .flatMap(Collection::stream).collect(Collectors.toList())); snapshotLocalDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); previousSnapshotInfo = snapshotInfo; - for (String table : liveFileMetaDataMap.keySet()) { - liveFileMetaDataMap.get(table).add( - createMockLiveFileMetaData("file" + counter++ + ".sst", table, "key1", "key4")); + for (Map.Entry> tableEntry : liveFileMetaDataMap.entrySet()) { + String table = tableEntry.getKey(); + tableEntry.getValue().add(createMockLiveFileMetaData("file" + counter++ + ".sst", table, "key1", "key4")); } snapshotIds.add(snapshotId); } From 8eeb44bcddb7e23a6da5e8bf831732d9217913c1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 10:28:07 -0400 Subject: [PATCH 048/126] HDDS-13783. Fix pmd Change-Id: Ib0066799f77554b0d27cb53da89e8f542c9c308a --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 8f6ce69ad554..05287286bca3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -70,7 +70,6 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); - private static final String SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME = "snapshot_local_data_lock"; private final ObjectSerializer snapshotLocalDataSerializer; private final MutableGraph localDataGraph; From 06e7d373f9837ca012704b6e1389af814dee06ca Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 14:23:29 -0400 Subject: [PATCH 049/126] HDDS-13785. Fix merge issue Change-Id: I8712f187b7f6f115feac7ce0a203eab32d091529 --- .../hadoop/ozone/om/OmSnapshotLocalData.java | 4 -- .../snapshot/OmSnapshotLocalDataManager.java | 65 +++++++++++-------- .../TestOmSnapshotLocalDataManager.java | 12 ++-- 3 files changed, 43 insertions(+), 38 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index b8dab82e9315..5de83927952c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -194,10 +194,6 @@ public void removeVersionSSTFileInfos(int snapshotVersion) { this.versionSstFileInfos.remove(snapshotVersion); } - public void removeVersionSSTFileInfos(int snapshotVersion) { - this.versionSstFileInfos.remove(snapshotVersion); - } - /** * Returns the checksum of the YAML representation. * @return checksum diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 9d3393c20585..9fbaa66d9383 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -79,7 +79,6 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); - private static final String SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME = "snapshot_local_data_lock"; private static final String LOCAL_DATA_MANAGER_SERVICE_NAME = "OmSnapshotLocalDataManagerService"; private final ObjectSerializer snapshotLocalDataSerializer; @@ -255,7 +254,7 @@ private void decreamentOrphanCheckCount(UUID snapshotId, int decrementBy) { }); } - private void init() throws IOException { + private void init(OzoneConfiguration configuration, SnapshotChainManager chainManager) throws IOException { this.locks = omMetadataManager.getHierarchicalLockManager(); this.snapshotToBeCheckedForOrphans = new ConcurrentHashMap<>(); RDBStore store = (RDBStore) omMetadataManager.getStore(); @@ -281,18 +280,21 @@ private void init() throws IOException { for (UUID snapshotId : versionNodeMap.keySet()) { increamentOrphanCheckCount(snapshotId); } - this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1); long snapshotLocalDataManagerServiceInterval = configuration.getTimeDuration( OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); - this.scheduler.scheduleWithFixedDelay( - () -> { - try { - checkOrphanSnapshotVersions(omMetadataManager, snapshotChainManager); - } catch (IOException e) { - LOG.error("Exception while checking orphan snapshot versions", e); - } - }, snapshotLocalDataManagerServiceInterval, snapshotLocalDataManagerServiceInterval, TimeUnit.MILLISECONDS); + if (snapshotLocalDataManagerServiceInterval > 0) { + this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1); + this.scheduler.scheduleWithFixedDelay( + () -> { + try { + checkOrphanSnapshotVersions(omMetadataManager, chainManager); + } catch (IOException e) { + LOG.error("Exception while checking orphan snapshot versions", e); + } + }, snapshotLocalDataManagerServiceInterval, snapshotLocalDataManagerServiceInterval, TimeUnit.MILLISECONDS); + } + } private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChainManager chainManager) @@ -300,25 +302,32 @@ private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, Snap for (Map.Entry entry : snapshotToBeCheckedForOrphans.entrySet()) { UUID snapshotId = entry.getKey(); int countBeforeCheck = entry.getValue(); - try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = - new WritableOmSnapshotLocalDataProvider(snapshotId)) { - OmSnapshotLocalData snapshotLocalData = snapshotLocalDataProvider.getSnapshotLocalData(); - boolean isSnapshotPurged = SnapshotUtils.isSnapshotPurged(chainManager, metadataManager, snapshotId); - for (Map.Entry integerLocalDataVersionNodeEntry : getVersionNodeMap().get( - snapshotId).getSnapshotVersions().entrySet()) { - LocalDataVersionNode versionEntry = integerLocalDataVersionNodeEntry.getValue(); - // remove the version entry if it is not referenced by any other snapshot version node. For version node 0 - // a newly created snapshot version could point to a version with indegree 0 in such a scenario a version 0 - // node can be only deleted if the snapshot is also purged. - boolean toRemove = localDataGraph.inDegree(versionEntry) == 0 - && (versionEntry.getVersion() != 0 || isSnapshotPurged); - if (toRemove) { - snapshotLocalData.removeVersionSSTFileInfos(versionEntry.getVersion()); - } + checkOrphanSnapshotVersions(metadataManager, chainManager, snapshotId); + decreamentOrphanCheckCount(snapshotId, countBeforeCheck); + } + } + + @VisibleForTesting + void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChainManager chainManager, + UUID snapshotId) throws IOException { + try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = new WritableOmSnapshotLocalDataProvider( + snapshotId)) { + OmSnapshotLocalData snapshotLocalData = snapshotLocalDataProvider.getSnapshotLocalData(); + boolean isSnapshotPurged = SnapshotUtils.isSnapshotPurged(chainManager, metadataManager, snapshotId); + for (Map.Entry integerLocalDataVersionNodeEntry : getVersionNodeMap() + .get(snapshotId).getSnapshotVersions().entrySet()) { + LocalDataVersionNode versionEntry = integerLocalDataVersionNodeEntry.getValue(); + // remove the version entry if it is not referenced by any other snapshot version node. For version node 0 + // a newly created snapshot version could point to a version with indegree 0 in such a scenario a version 0 + // node can be only deleted if the snapshot is also purged. + boolean toRemove = localDataGraph.inDegree(versionEntry) == 0 + && ((versionEntry.getVersion() != 0 && versionEntry.getVersion() != snapshotLocalData.getVersion()) + || isSnapshotPurged); + if (toRemove) { + snapshotLocalData.removeVersionSSTFileInfos(versionEntry.getVersion()); } - snapshotLocalDataProvider.commit(); } - decreamentOrphanCheckCount(snapshotId, countBeforeCheck); + snapshotLocalDataProvider.commit(); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 7de9deba207c..a5590c4895f3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -283,7 +283,7 @@ private void mockSnapshotStore(UUID snapshotId, List sstFiles) @ParameterizedTest @ValueSource(booleans = {true, false}) public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 20); for (int start = 0; start < snapshotIds.size(); start++) { for (int end = start + 1; end < snapshotIds.size(); end++) { @@ -327,7 +327,7 @@ public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOExcept @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionLockResolution(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 5); for (int snapIdx = 0; snapIdx < snapshotIds.size(); snapIdx++) { UUID snapId = snapshotIds.get(snapIdx); @@ -365,7 +365,7 @@ public void testVersionLockResolution(boolean read) throws IOException { @Test public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExisting() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); UUID snapId = snapshotIds.get(1); try (WritableOmSnapshotLocalDataProvider omSnapshotLocalDataProvider = @@ -382,7 +382,7 @@ public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExis @Test public void testAddVersionFromRDB() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); addVersionsToLocalData(localDataManager, snapshotIds.get(0), ImmutableMap.of(4, 5, 6, 8)); UUID snapId = snapshotIds.get(1); @@ -445,7 +445,7 @@ private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManage @ParameterizedTest @ValueSource(booleans = {true, false}) public void testWriteVersionValidation(boolean nextVersionExisting) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); UUID prevSnapId = snapshotIds.get(0); UUID snapId = snapshotIds.get(1); @@ -502,7 +502,7 @@ private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalData @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionResolution(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 5); List> versionMaps = Arrays.asList( ImmutableMap.of(4, 1, 6, 3, 8, 9, 11, 15), From fab85eaa382dd824a66716a0a87e72677257b6cc Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 15:14:14 -0400 Subject: [PATCH 050/126] HDDS-13785. Fix checkstyle Change-Id: I5f39975ae4d2631cd6dd5dae83ed73cf6bd52d14 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 -- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 3 --- 2 files changed, 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 9fbaa66d9383..eac346667db8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -43,14 +43,12 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.Scheduler; -import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index a5590c4895f3..adfe4b0414e6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -27,8 +27,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; @@ -728,7 +726,6 @@ public void testInitWithInvalidPathThrowsException() throws IOException { @Test public void testClose() throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); - // Should not throw exception localDataManager.close(); } From c73a35519c12765c4b4e28a62f13e677788f9098 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 15:29:52 -0400 Subject: [PATCH 051/126] HDDS-13783. Fix test Change-Id: I8e5ba34d39de9c8e007d2902a8738c487b01902a --- .../org/apache/hadoop/ozone/om/TestOmSnapshotManager.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 7f808df3f978..6ec49935b356 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -774,6 +774,7 @@ public void testCreateSnapshotIdempotent() throws Exception { when(bucketTable.get(dbBucketKey)).thenReturn(omBucketInfo); SnapshotInfo first = createSnapshotInfo(volumeName, bucketName); + first.setPathPreviousSnapshotId(null); when(snapshotInfoTable.get(first.getTableKey())).thenReturn(first); // Create first checkpoint for the snapshot checkpoint @@ -797,10 +798,13 @@ public void testCreateSnapshotIdempotent() throws Exception { private SnapshotInfo createSnapshotInfo(String volumeName, String bucketName) { - return SnapshotInfo.newInstance(volumeName, + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, UUID.randomUUID().toString(), UUID.randomUUID(), Time.now()); + snapshotInfo.setPathPreviousSnapshotId(null); + snapshotInfo.setGlobalPreviousSnapshotId(null); + return snapshotInfo; } } From 1d39beeadef77359ac48c19305e5a9234d6d5ee0 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 15:55:48 -0400 Subject: [PATCH 052/126] HDDS-13785. Fix test Change-Id: I0b33548be47ab3d94a9ad7bed92ad81d16b9ea29 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 +++- .../org/apache/hadoop/ozone/om/TestOmSnapshotManager.java | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index eac346667db8..7c3bcaa560fd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -236,7 +236,9 @@ void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws } private void increamentOrphanCheckCount(UUID snapshotId) { - this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> v == null ? 1 : v + 1); + if (snapshotId != null) { + this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> v == null ? 1 : (v + 1)); + } } private void decreamentOrphanCheckCount(UUID snapshotId, int decrementBy) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 6ec49935b356..73f9e2863be3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -330,7 +330,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); assertFalse(localData.getSstFiltered()); assertEquals(0L, localData.getLastDefragTime()); - assertFalse(localData.getNeedsDefrag()); + assertTrue(localData.getNeedsDefrag()); assertEquals(1, localData.getVersionSstFileInfos().size()); // Cleanup From b1a38343c5f2eb98727c2ae468082d8126b4eb42 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 16:09:17 -0400 Subject: [PATCH 053/126] HDDS-13785. Fix test Change-Id: Id6de8a04ae094bf0069bf6dc6e604fe26712d6ee --- .../main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java | 5 ----- hadoop-hdds/common/src/main/resources/ozone-default.xml | 5 +++++ .../main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java | 3 +++ .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 ++-- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 1b1d7fcc95d5..db66fed22fe9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -640,11 +640,6 @@ public final class OzoneConfigKeys { OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT = TimeUnit.DAYS.toMillis(30); - public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL = - "ozone.om.snapshot.local.data.manager.service.interval"; - - public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT = "5m"; - public static final String OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL = "ozone.om.snapshot.compaction.dag.prune.daemon.run.interval"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 462b1e4331f4..06dc7f84d63a 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -4848,4 +4848,9 @@ 10000 Maximum number of lock objects that could be present in the pool. + + ozone.om.snapshot.local.data.manager.service.interval + 5m + Interval for cleaning up orphan snapshot local data versions corresponding to snapshots + diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 254a49ea9a99..d31bc2c9fc3b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -681,6 +681,9 @@ public final class OMConfigKeys { public static final String OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT = "ozone.om.hierarchical.resource.locks.hard.limit"; public static final int OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT = 10000; + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL = + "ozone.om.snapshot.local.data.manager.service.interval"; + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT = "5m"; /** * Never constructed. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 7c3bcaa560fd..e39662d44912 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.snapshot; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import com.google.common.annotations.VisibleForTesting; From 1986bbefda494bbcce356a9468980528cda890f6 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 18:28:43 -0400 Subject: [PATCH 054/126] HDDS-13785. Fix conditions Change-Id: Ic5d26fdaec10470b2dc6c14deacbf185ccbef2bf --- .../snapshot/OmSnapshotLocalDataManager.java | 22 +++++++++-- .../TestOmSnapshotLocalDataManager.java | 38 +++++++++---------- 2 files changed, 36 insertions(+), 24 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index e39662d44912..f98672ad71e8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -254,6 +254,10 @@ private void decreamentOrphanCheckCount(UUID snapshotId, int decrementBy) { }); } + Map getSnapshotToBeCheckedForOrphans() { + return snapshotToBeCheckedForOrphans; + } + private void init(OzoneConfiguration configuration, SnapshotChainManager chainManager) throws IOException { this.locks = omMetadataManager.getHierarchicalLockManager(); this.snapshotToBeCheckedForOrphans = new ConcurrentHashMap<>(); @@ -696,25 +700,37 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : existingSnapVersions.getSnapshotVersions(); + Map newVersions = snapshotVersions.getSnapshotVersions(); Map> predecessors = new HashMap<>(); + boolean versionsRemoved = false; // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { LocalDataVersionNode existingVersionNode = existingVersion.getValue(); predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); + versionsRemoved = versionsRemoved || !newVersions.containsKey(existingVersion.getKey()); localDataGraph.removeNode(existingVersionNode); } // Add the nodes to be added in the graph and map. addSnapshotVersionMeta(snapshotId, snapshotVersions); // Reconnect all the predecessors for existing nodes. - for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { + for (Map.Entry entry : newVersions.entrySet()) { for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { localDataGraph.putEdge(predecessor, entry.getValue()); } } - // The previous snapshotId could have become an orphan entry or could have orphan versions. if (existingSnapVersions != null) { - increamentOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); + // The previous snapshotId could have become an orphan entry or could have orphan versions.(In case of + // version removals) + if (versionsRemoved || !Objects.equals(existingSnapVersions.getPreviousSnapshotId(), + snapshotVersions.getPreviousSnapshotId())) { + increamentOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); + } + // If the version is also updated it could mean that there could be some orphan version present within the + // same snapshot. + if (existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { + increamentOrphanCheckCount(snapshotId); + } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index adfe4b0414e6..659e37d753ba 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -63,7 +63,6 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.FlatResource; import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager; @@ -102,9 +101,6 @@ public class TestOmSnapshotLocalDataManager { @Mock private HierarchicalResourceLockManager lockManager; - @Mock - private SnapshotChainManager chainManager; - @Mock private RDBStore rdbStore; @@ -281,7 +277,7 @@ private void mockSnapshotStore(UUID snapshotId, List sstFiles) @ParameterizedTest @ValueSource(booleans = {true, false}) public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 20); for (int start = 0; start < snapshotIds.size(); start++) { for (int end = start + 1; end < snapshotIds.size(); end++) { @@ -325,7 +321,7 @@ public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOExcept @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionLockResolution(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 5); for (int snapIdx = 0; snapIdx < snapshotIds.size(); snapIdx++) { UUID snapId = snapshotIds.get(snapIdx); @@ -363,7 +359,7 @@ public void testVersionLockResolution(boolean read) throws IOException { @Test public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExisting() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); UUID snapId = snapshotIds.get(1); try (WritableOmSnapshotLocalDataProvider omSnapshotLocalDataProvider = @@ -380,7 +376,7 @@ public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExis @Test public void testAddVersionFromRDB() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); addVersionsToLocalData(localDataManager, snapshotIds.get(0), ImmutableMap.of(4, 5, 6, 8)); UUID snapId = snapshotIds.get(1); @@ -443,7 +439,7 @@ private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManage @ParameterizedTest @ValueSource(booleans = {true, false}) public void testWriteVersionValidation(boolean nextVersionExisting) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); UUID prevSnapId = snapshotIds.get(0); UUID snapId = snapshotIds.get(1); @@ -500,7 +496,7 @@ private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalData @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionResolution(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 5); List> versionMaps = Arrays.asList( ImmutableMap.of(4, 1, 6, 3, 8, 9, 11, 15), @@ -543,7 +539,7 @@ public void testVersionResolution(boolean read) throws IOException { @Test public void testConstructor() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); assertNotNull(localDataManager); } @@ -552,7 +548,7 @@ public void testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOExce UUID snapshotId = UUID.randomUUID(); SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); File yamlPath = new File(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); assertNotNull(yamlPath); @@ -583,7 +579,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { RocksDatabase rocksDatabase = mock(RocksDatabase.class); when(snapshotStore.getDb()).thenReturn(rocksDatabase); when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); @@ -606,7 +602,7 @@ public void testGetOmSnapshotLocalDataWithSnapshotInfo() throws IOException { // Create and write snapshot local data file OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); // Write the file manually for testing Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId())); @@ -627,7 +623,7 @@ public void testGetOmSnapshotLocalDataWithMismatchedSnapshotId() throws IOExcept // Create local data with wrong snapshot ID OmSnapshotLocalData localData = createMockLocalData(wrongSnapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotId)); writeLocalDataToFile(localData, yamlPath); @@ -643,7 +639,7 @@ public void testGetOmSnapshotLocalDataWithFile() throws IOException { OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); Path yamlPath = tempDir.resolve("test-snapshot.yaml"); writeLocalDataToFile(localData, yamlPath); @@ -661,7 +657,7 @@ public void testAddVersionNodeWithDependents() throws IOException { .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); UUID snapshotId = versionIds.get(0); UUID previousSnapshotId = versionIds.get(1); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); // Create snapshot directory structure and files createSnapshotLocalDataFile(snapshotId, previousSnapshotId); createSnapshotLocalDataFile(previousSnapshotId, null); @@ -677,7 +673,7 @@ public void testAddVersionNodeWithDependentsAlreadyExists() throws IOException { createSnapshotLocalDataFile(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); @@ -699,7 +695,7 @@ public void testInitWithExistingYamlFiles() throws IOException { createSnapshotLocalDataFile(snapshotId, previousSnapshotId); // Initialize - should load existing files - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); assertNotNull(localDataManager); Map versionMap = @@ -719,13 +715,13 @@ public void testInitWithInvalidPathThrowsException() throws IOException { // Should throw IOException during init assertThrows(IOException.class, () -> { - new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + new OmSnapshotLocalDataManager(omMetadataManager, null, conf); }); } @Test public void testClose() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); // Should not throw exception localDataManager.close(); } From 52be3dd8a030e4cff9ea8e13d36de53d1a6ad023 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 19:45:05 -0400 Subject: [PATCH 055/126] HDDS-13783. Allow version resolution to null Change-Id: Ibe23607830bee6a96812cbb0b541dcac33370be9 --- .../snapshot/OmSnapshotLocalDataManager.java | 28 +++++++++++-------- .../TestOmSnapshotLocalDataManager.java | 13 +++++++-- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 05287286bca3..33bff2bbd484 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -374,19 +374,19 @@ protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOExceptio } protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapIdToResolve) throws IOException { - this(snapshotId, true, null, snapIdToResolve); + this(snapshotId, true, null, snapIdToResolve, true); } protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, boolean readLock) throws IOException { - this(snapshotId, readLock, null, null); + this(snapshotId, readLock, null, null, false); } protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, boolean readLock, CheckedSupplier, IOException> snapshotLocalDataSupplier, - UUID snapshotIdToBeResolved) throws IOException { + UUID snapshotIdToBeResolved, boolean isSnapshotToBeResolvedNullable) throws IOException { this.snapshotId = snapshotId; LockDataProviderInitResult result = initialize(readLock, snapshotId, snapshotIdToBeResolved, - snapshotLocalDataSupplier); + isSnapshotToBeResolvedNullable, snapshotLocalDataSupplier); this.snapshotLocalData = result.getSnapshotLocalData(); this.lock = result.getLock(); this.previousLock = result.getPreviousLock(); @@ -423,9 +423,9 @@ private HierarchicalResourceLock acquireLock(UUID snapId, boolean readLock) thro * Intializer the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the * snapshotId to be resolved by iterating through the chain of previous snapshot ids. */ - private LockDataProviderInitResult initialize(boolean readLock, UUID snapId, UUID toResolveSnapshotId, - CheckedSupplier, IOException> snapshotLocalDataSupplier) - throws IOException { + private LockDataProviderInitResult initialize( + boolean readLock, UUID snapId, UUID toResolveSnapshotId, boolean isSnapshotToBeResolvedNullable, + CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { HierarchicalResourceLock snapIdLock = null; HierarchicalResourceLock previousReadLockAcquired = null; try { @@ -445,13 +445,16 @@ private LockDataProviderInitResult initialize(boolean readLock, UUID snapId, UUI // do while loop since the nodes that need be added may not be present in the graph so it may not be possible // to iterate through the chain. UUID previousSnapshotId = ssLocalData.getPreviousSnapshotId(); - if (previousSnapshotId != null) { + // if flag toResolveSnapshotIdIsNull is true or toResolveSnapshotId is not null, then we resolve snapshot + // with previous snapshot id as null, which would mean if the snapshot local data is committed the snapshot + // local data would become first snapshot in the chain with no previous snapshot id. + toResolveSnapshotId = (isSnapshotToBeResolvedNullable || toResolveSnapshotId != null) ? toResolveSnapshotId : + ssLocalData.getPreviousSnapshotId(); + if (toResolveSnapshotId != null && previousSnapshotId != null) { if (!versionNodeMap.containsKey(previousSnapshotId)) { throw new IOException(String.format("Operating on snapshot id : %s with previousSnapshotId: %s invalid " + "since previousSnapshotId is not loaded.", snapId, previousSnapshotId)); } - toResolveSnapshotId = toResolveSnapshotId == null ? ssLocalData.getPreviousSnapshotId() : - toResolveSnapshotId; previousReadLockAcquired = acquireLock(previousSnapshotId, true); // Create a copy of the previous versionMap to get the previous versions corresponding to the previous // snapshot. This map would mutated to resolve the previous snapshot's version corresponding to the @@ -521,6 +524,7 @@ private LockDataProviderInitResult initialize(boolean readLock, UUID snapId, UUI } } else { toResolveSnapshotId = null; + ssLocalData.setPreviousSnapshotId(null); } return new LockDataProviderInitResult(snapIdLock, ssLocalData, previousReadLockAcquired, toResolveSnapshotId); } catch (IOException e) { @@ -569,13 +573,13 @@ private WritableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapshotIdToBeResolved) throws IOException { - super(snapshotId, false, null, snapshotIdToBeResolved); + super(snapshotId, false, null, snapshotIdToBeResolved, true); fullLock.readLock().lock(); } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { - super(snapshotId, false, snapshotLocalDataSupplier, null); + super(snapshotId, false, snapshotLocalDataSupplier, null, false); fullLock.readLock().lock(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index dcea25465949..edcee1f48884 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -267,7 +268,9 @@ private void mockSnapshotStore(UUID snapshotId, List sstFiles) @ValueSource(booleans = {true, false}) public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); - List snapshotIds = createSnapshotLocalData(localDataManager, 20); + List snapshotIds = new ArrayList<>(); + snapshotIds.add(null); + snapshotIds.addAll(createSnapshotLocalData(localDataManager, 20)); for (int start = 0; start < snapshotIds.size(); start++) { for (int end = start + 1; end < snapshotIds.size(); end++) { UUID startSnapshotId = snapshotIds.get(start); @@ -279,8 +282,14 @@ public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOExcept localDataManager.getWritableOmSnapshotLocalData(endSnapshotId, startSnapshotId)) { OmSnapshotLocalData snapshotLocalData = omSnapshotLocalDataProvider.getSnapshotLocalData(); OmSnapshotLocalData previousSnapshot = omSnapshotLocalDataProvider.getPreviousSnapshotLocalData(); - assertEquals(startSnapshotId, previousSnapshot.getSnapshotId()); assertEquals(endSnapshotId, snapshotLocalData.getSnapshotId()); + if (startSnapshotId == null) { + assertNull(previousSnapshot); + assertNull(snapshotLocalData.getPreviousSnapshotId()); + continue; + } + assertEquals(startSnapshotId, previousSnapshot.getSnapshotId()); + assertEquals(startSnapshotId, snapshotLocalData.getPreviousSnapshotId()); if (read) { assertEquals(getReadLockMessageAcquire(endSnapshotId), lockCapturor.get(logCaptorIdx++)); } else { From 908c47d04a85659fe14657e4b78008486fc7e1f2 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 23:26:02 -0400 Subject: [PATCH 056/126] HDDS-13785. Fix tests Change-Id: I39d1ba6b8bac77fe0ff442b61afe56efe50c9712 --- .../om/snapshot/filter/AbstractReclaimableFilterTest.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java index ef97975ca8ec..3c50e93625f5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java @@ -52,6 +52,7 @@ import org.apache.hadoop.ozone.om.BucketManager; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -162,10 +163,11 @@ protected void teardown() throws IOException { } private void mockOzoneManager(BucketLayout bucketLayout) throws IOException { - OMMetadataManager metadataManager = mock(OMMetadataManager.class); + OmMetadataManagerImpl metadataManager = mock(OmMetadataManagerImpl.class); BucketManager bucketManager = mock(BucketManager.class); when(ozoneManager.getMetadataManager()).thenReturn(metadataManager); when(ozoneManager.getBucketManager()).thenReturn(bucketManager); + when(metadataManager.getSnapshotChainManager()).thenReturn(snapshotChainManager); long volumeCount = 0; for (String volume : volumes) { when(metadataManager.getVolumeId(eq(volume))).thenReturn(volumeCount); From 278605af16b1b5ce8216d83577c379fcd3795d2f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 13:23:35 -0400 Subject: [PATCH 057/126] HDDS-13783. Add dirty bit Change-Id: I165f00132548acf920b8fb9d7530a6314366797d --- .../hadoop/ozone/om/OmSnapshotLocalData.java | 10 +- .../snapshot/OmSnapshotLocalDataManager.java | 102 ++++++++++++------ .../TestOmSnapshotLocalDataManager.java | 14 ++- 3 files changed, 84 insertions(+), 42 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5de83927952c..5d474d371329 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -115,7 +115,7 @@ public boolean getSstFiltered() { * Sets whether SST is filtered for this snapshot. * @param sstFiltered */ - public void setSstFiltered(boolean sstFiltered) { + void setSstFiltered(boolean sstFiltered) { this.isSSTFiltered = sstFiltered; } @@ -131,7 +131,7 @@ public long getLastDefragTime() { * Sets the last defrag time, in epoch milliseconds. * @param lastDefragTime Timestamp of the last defrag */ - public void setLastDefragTime(Long lastDefragTime) { + void setLastDefragTime(Long lastDefragTime) { this.lastDefragTime = lastDefragTime; } @@ -147,7 +147,7 @@ public boolean getNeedsDefrag() { * Sets whether the snapshot needs defrag. * @param needsDefrag true if the snapshot needs defrag, false otherwise */ - public void setNeedsDefrag(boolean needsDefrag) { + void setNeedsDefrag(boolean needsDefrag) { this.needsDefrag = needsDefrag; } @@ -163,7 +163,7 @@ public Map getVersionSstFileInfos() { * Sets the defragged SST file list. * @param versionSstFileInfos Map of version to defragged SST file list */ - public void setVersionSstFileInfos(Map versionSstFileInfos) { + void setVersionSstFileInfos(Map versionSstFileInfos) { this.versionSstFileInfos.clear(); this.versionSstFileInfos.putAll(versionSstFileInfos); } @@ -260,7 +260,7 @@ public int getVersion() { * Sets the version of the snapshot local data. A valid version shall be greater than 0. * @param version version */ - public void setVersion(int version) { + void setVersion(int version) { this.version = version; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 33bff2bbd484..430ba1ef4555 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -328,6 +328,27 @@ private OmSnapshotLocalData getSnapshotLocalData() { } } + private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); + Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : + existingSnapVersions.getSnapshotVersions(); + Map> predecessors = new HashMap<>(); + // Track all predecessors of the existing versions and remove the node from the graph. + for (Map.Entry existingVersion : existingVersions.entrySet()) { + LocalDataVersionNode existingVersionNode = existingVersion.getValue(); + predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); + localDataGraph.removeNode(existingVersionNode); + } + // Add the nodes to be added in the graph and map. + addSnapshotVersionMeta(snapshotId, snapshotVersions); + // Reconnect all the predecessors for existing nodes. + for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { + for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { + localDataGraph.putEdge(predecessor, entry.getValue()); + } + } + } + /** * The ReadableOmSnapshotLocalDataProvider class is responsible for managing the * access and initialization of local snapshot data in a thread-safe manner. @@ -567,6 +588,8 @@ public void close() throws IOException { */ public final class WritableOmSnapshotLocalDataProvider extends ReadableOmSnapshotLocalDataProvider { + private boolean dirty; + private WritableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { super(snapshotId, false); fullLock.readLock().lock(); @@ -586,6 +609,7 @@ private WritableOmSnapshotLocalDataProvider(UUID snapshotId, private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLocalData) throws IOException { SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); + SnapshotVersionsMeta existingVersionsMeta = getVersionNodeMap().get(snapshotLocalData.getSnapshotId()); for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { validateVersionAddition(node); } @@ -597,28 +621,12 @@ private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLo validateVersionRemoval(snapshotId, entry.getKey()); } } - return versionsToBeAdded; - } - - private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { - SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); - Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : - existingSnapVersions.getSnapshotVersions(); - Map> predecessors = new HashMap<>(); - // Track all predecessors of the existing versions and remove the node from the graph. - for (Map.Entry existingVersion : existingVersions.entrySet()) { - LocalDataVersionNode existingVersionNode = existingVersion.getValue(); - predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); - localDataGraph.removeNode(existingVersionNode); - } - // Add the nodes to be added in the graph and map. - addSnapshotVersionMeta(snapshotId, snapshotVersions); - // Reconnect all the predecessors for existing nodes. - for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { - for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { - localDataGraph.putEdge(predecessor, entry.getValue()); - } + // Set Dirty if the snapshot doesn't exist or previousSnapshotId has changed. + if (existingVersionsMeta == null || !Objects.equals(versionsToBeAdded.getPreviousSnapshotId(), + existingVersionsMeta.getPreviousSnapshotId())) { + setDirty(); } + return versionsToBeAdded; } public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { @@ -626,24 +634,50 @@ public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { OmSnapshotLocalData previousSnapshotLocalData = getPreviousSnapshotLocalData(); this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, previousSnapshotLocalData == null ? 0 : previousSnapshotLocalData.getVersion()); + // Set Dirty if a version is added. + setDirty(); + } + + public void removeVersion(int version) { + this.getSnapshotLocalData().removeVersionSSTFileInfos(version); + // Set Dirty if a version is removed. + setDirty(); } public synchronized void commit() throws IOException { + // Validate modification and commit the changes. SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); - String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); - String tmpFilePath = filePath + ".tmp"; - File tmpFile = new File(tmpFilePath); - boolean tmpFileExists = tmpFile.exists(); - if (tmpFileExists) { - tmpFileExists = !tmpFile.delete(); - } - if (tmpFileExists) { - throw new IOException("Unable to delete tmp file " + tmpFilePath); + // Need to update the disk state if and only if the dirty bit is set. + if (isDirty()) { + String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); + String tmpFilePath = filePath + ".tmp"; + File tmpFile = new File(tmpFilePath); + boolean tmpFileExists = tmpFile.exists(); + if (tmpFileExists) { + tmpFileExists = !tmpFile.delete(); + } + if (tmpFileExists) { + throw new IOException("Unable to delete tmp file " + tmpFilePath); + } + snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); + Files.move(tmpFile.toPath(), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING); + upsertNode(super.snapshotId, localDataVersionNodes); + // Reset dirty bit + resetDirty(); } - snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); - Files.move(tmpFile.toPath(), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE, - StandardCopyOption.REPLACE_EXISTING); - upsertNode(super.snapshotId, localDataVersionNodes); + } + + private void setDirty() { + dirty = true; + } + + private void resetDirty() { + dirty = false; + } + + private boolean isDirty() { + return dirty; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index edcee1f48884..e63f73e295d6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -49,6 +49,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -76,6 +77,7 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -87,6 +89,7 @@ /** * Test class for OmSnapshotLocalDataManager. */ +@Timeout(value = 30, unit = TimeUnit.SECONDS) public class TestOmSnapshotLocalDataManager { private static YamlSerializer snapshotLocalDataYamlSerializer; @@ -449,7 +452,7 @@ public void testWriteVersionValidation(boolean nextVersionExisting) throws IOExc if (nextVersionExisting) { try (WritableOmSnapshotLocalDataProvider prevSnap = localDataManager.getWritableOmSnapshotLocalData(prevSnapId)) { - prevSnap.getSnapshotLocalData().removeVersionSSTFileInfos(4); + prevSnap.removeVersion(4); IOException ex = assertThrows(IOException.class, prevSnap::commit); assertTrue(ex.getMessage().contains("Cannot remove Snapshot " + prevSnapId + " with version : 4 since it " + "still has predecessors")); @@ -458,12 +461,12 @@ public void testWriteVersionValidation(boolean nextVersionExisting) throws IOExc validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0, 4)); } else { try (WritableOmSnapshotLocalDataProvider snap = localDataManager.getWritableOmSnapshotLocalData(snapId)) { - snap.getSnapshotLocalData().removeVersionSSTFileInfos(5); + snap.removeVersion(5); snap.commit(); } try (WritableOmSnapshotLocalDataProvider prevSnap = localDataManager.getWritableOmSnapshotLocalData(prevSnapId)) { - prevSnap.getSnapshotLocalData().removeVersionSSTFileInfos(4); + prevSnap.removeVersion(4); prevSnap.commit(); } validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0)); @@ -481,6 +484,11 @@ private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalData snapshotLocalData.addVersionSSTFileInfos(ImmutableList.of(createMockLiveFileMetaData("file" + version + ".sst", KEY_TABLE, "key1", "key2")), version.getValue()); } + mockSnapshotStore(snapId, ImmutableList.of(createMockLiveFileMetaData("file" + + snapshotLocalData.getVersion() + 1 + ".sst", KEY_TABLE, "key1", "key2"))); + snap.addSnapshotVersion(snapshotStore); + snap.removeVersion(snapshotLocalData.getVersion()); + snapshotLocalData.setVersion(snapshotLocalData.getVersion() - 1); snap.commit(); } try (ReadableOmSnapshotLocalDataProvider snap = snapshotLocalDataManager.getOmSnapshotLocalData(snapId)) { From ac4719b311d0ee4bb7b81bca512bfb4c39b75252 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 13:47:51 -0400 Subject: [PATCH 058/126] Merge Change-Id: Ie7ac6a6cad96577e53a99b38a57e54ed888f0333 --- .../java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5d474d371329..cadeabe75458 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -147,7 +147,7 @@ public boolean getNeedsDefrag() { * Sets whether the snapshot needs defrag. * @param needsDefrag true if the snapshot needs defrag, false otherwise */ - void setNeedsDefrag(boolean needsDefrag) { + public void setNeedsDefrag(boolean needsDefrag) { this.needsDefrag = needsDefrag; } From cf19dce44465d132709d0c76d76f8624ee13d645 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 14:00:05 -0400 Subject: [PATCH 059/126] HDDS-13783. Address review comments Change-Id: I3df543d896463f24ba3b69fce1b2f655af612dc6 --- .../hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index e63f73e295d6..0be19dfb36ee 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -370,7 +370,6 @@ public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExis "key1", "key2")), 3); IOException ex = assertThrows(IOException.class, omSnapshotLocalDataProvider::commit); - System.out.println(ex.getMessage()); assertTrue(ex.getMessage().contains("since previous snapshot with version hasn't been loaded")); } } From 34097de7a535eb050d0378a672f1ff5660c3574c Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 14:08:21 -0400 Subject: [PATCH 060/126] HDDS-13783. Address review comments Change-Id: I94cf4b82b2b620f480e2d1e01e6d94a6679d974e --- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 430ba1ef4555..d9316cbcb2b7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -215,6 +215,11 @@ void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws if (prevSnapId != null && !versionNodeMap.containsKey(prevSnapId)) { File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(prevSnapId)); OmSnapshotLocalData prevSnapshotLocalData = snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + if (!prevSnapId.equals(prevSnapshotLocalData.getSnapshotId())) { + throw new IOException("SnapshotId mismatch: expected " + prevSnapId + + " but found " + prevSnapshotLocalData.getSnapshotId() + + " in file " + previousSnapshotLocalDataFile.getAbsolutePath()); + } stack.push(Pair.of(prevSnapshotLocalData.getSnapshotId(), new SnapshotVersionsMeta(prevSnapshotLocalData))); } visitedSnapshotIds.add(snapId); @@ -441,7 +446,7 @@ private HierarchicalResourceLock acquireLock(UUID snapId, boolean readLock) thro } /** - * Intializer the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the + * Intializes the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the * snapshotId to be resolved by iterating through the chain of previous snapshot ids. */ private LockDataProviderInitResult initialize( From c46ddc276a34a236bc5bc3607f19805cbe7e699c Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 14:10:37 -0400 Subject: [PATCH 061/126] HDDS-13783. Address review comments Change-Id: I661e61e04031c1bcd537024e0a0859a6d6aeaffd --- .../org/apache/hadoop/ozone/om/OmSnapshotLocalData.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5d474d371329..fb9dbe1c49e9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -115,7 +115,7 @@ public boolean getSstFiltered() { * Sets whether SST is filtered for this snapshot. * @param sstFiltered */ - void setSstFiltered(boolean sstFiltered) { + public void setSstFiltered(boolean sstFiltered) { this.isSSTFiltered = sstFiltered; } @@ -131,7 +131,7 @@ public long getLastDefragTime() { * Sets the last defrag time, in epoch milliseconds. * @param lastDefragTime Timestamp of the last defrag */ - void setLastDefragTime(Long lastDefragTime) { + public void setLastDefragTime(Long lastDefragTime) { this.lastDefragTime = lastDefragTime; } @@ -147,7 +147,7 @@ public boolean getNeedsDefrag() { * Sets whether the snapshot needs defrag. * @param needsDefrag true if the snapshot needs defrag, false otherwise */ - void setNeedsDefrag(boolean needsDefrag) { + public void setNeedsDefrag(boolean needsDefrag) { this.needsDefrag = needsDefrag; } From 99afc0294cbdf922f1719cc36650f27db745bd67 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 15:08:10 -0400 Subject: [PATCH 062/126] HDDS-13783. Address review comments Change-Id: I59cab67d93f0359bca54c0c8119f5018167b7d1a --- .../java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index fb9dbe1c49e9..1c840a1cd2e9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -260,7 +260,7 @@ public int getVersion() { * Sets the version of the snapshot local data. A valid version shall be greater than 0. * @param version version */ - void setVersion(int version) { + public void setVersion(int version) { this.version = version; } From 48ec0bb1f1d34a90b86aeb67aab9dd623ed4f23e Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 18 Oct 2025 15:07:19 -0400 Subject: [PATCH 063/126] HDDS-13810. Fix Build Issue because of unused dependency Change-Id: I950b4d0cc45a74369b8efa36deaf947db4cb35bc --- hadoop-hdds/server-scm/pom.xml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 811d439f7dd8..d7a1f9dc19c9 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -38,11 +38,6 @@ com.fasterxml.jackson.core jackson-core - - com.fasterxml.jackson.core - jackson-databind - - com.google.guava guava @@ -71,11 +66,6 @@ org.apache.commons commons-collections4 - - org.apache.commons - commons-compress - - org.apache.commons commons-lang3 @@ -88,11 +78,6 @@ org.apache.hadoop hadoop-common - - org.apache.ozone - hdds-client - - org.apache.ozone hdds-common @@ -170,6 +155,16 @@ hdds-docs provided + + com.fasterxml.jackson.core + jackson-databind + test + + + org.apache.commons + commons-compress + test + @@ -194,6 +189,11 @@ hadoop-hdfs-client test + + org.apache.ozone + hdds-client + test + org.apache.ozone hdds-common From cb31b7c6ae6e7a840d020a0a14afc0657e2fe801 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 19 Oct 2025 08:32:32 -0400 Subject: [PATCH 064/126] Revert "HDDS-13810. Fix Build Issue because of unused dependency" This reverts commit 48ec0bb1f1d34a90b86aeb67aab9dd623ed4f23e. --- hadoop-hdds/server-scm/pom.xml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index d7a1f9dc19c9..811d439f7dd8 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -38,6 +38,11 @@ com.fasterxml.jackson.core jackson-core + + com.fasterxml.jackson.core + jackson-databind + + com.google.guava guava @@ -66,6 +71,11 @@ org.apache.commons commons-collections4 + + org.apache.commons + commons-compress + + org.apache.commons commons-lang3 @@ -78,6 +88,11 @@ org.apache.hadoop hadoop-common + + org.apache.ozone + hdds-client + + org.apache.ozone hdds-common @@ -155,16 +170,6 @@ hdds-docs provided - - com.fasterxml.jackson.core - jackson-databind - test - - - org.apache.commons - commons-compress - test - @@ -189,11 +194,6 @@ hadoop-hdfs-client test - - org.apache.ozone - hdds-client - test - org.apache.ozone hdds-common From ff90af89830d6abc72763e21a516d2c78505f730 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 19 Oct 2025 22:57:33 -0400 Subject: [PATCH 065/126] HDDS-13785. Add unit tests Change-Id: I21de33cbea63edda0bd21503cb2516ea1d1f1647 --- .../snapshot/OmSnapshotLocalDataManager.java | 6 +- .../TestOmSnapshotLocalDataManager.java | 79 ++++++++++++++++++- 2 files changed, 81 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 98260feea71d..71f274b39c84 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -333,7 +333,7 @@ void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChai && ((versionEntry.getVersion() != 0 && versionEntry.getVersion() != snapshotLocalData.getVersion()) || isSnapshotPurged); if (toRemove) { - snapshotLocalData.removeVersionSSTFileInfos(versionEntry.getVersion()); + snapshotLocalDataProvider.removeVersion(versionEntry.getVersion()); } } snapshotLocalDataProvider.commit(); @@ -438,7 +438,7 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { LocalDataVersionNode existingVersionNode = existingVersion.getValue(); - predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); + predecessors.put(existingVersion.getKey(), new HashSet<>(localDataGraph.predecessors(existingVersionNode))); versionsRemoved = versionsRemoved || !newVersions.containsKey(existingVersion.getKey()); localDataGraph.removeNode(existingVersionNode); } @@ -787,7 +787,7 @@ public synchronized void commit() throws IOException { } else if (snapshotLocalDataFile.exists()) { LOG.info("Deleting Yaml file corresponding to snapshotId: {} in path : {}", super.snapshotId, snapshotLocalDataFile.getAbsolutePath()); - if (snapshotLocalDataFile.delete()) { + if (!snapshotLocalDataFile.delete()) { throw new IOException("Unable to delete file " + snapshotLocalDataFile.getAbsolutePath()); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 51d491c23f90..bfb6a83cc500 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -18,11 +18,13 @@ package org.apache.hadoop.ozone.om.snapshot; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -38,6 +40,7 @@ import com.google.common.collect.ImmutableMap; import java.io.File; import java.io.IOException; +import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; @@ -98,6 +101,7 @@ public class TestOmSnapshotLocalDataManager { private static YamlSerializer snapshotLocalDataYamlSerializer; private static List lockCapturor; private static OzoneConfiguration conf; + private static Map purgedSnapshotIdMap; @Mock private OMMetadataManager omMetadataManager; @@ -137,6 +141,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO } }; lockCapturor = new ArrayList<>(); + purgedSnapshotIdMap = new HashMap<>(); } @AfterAll @@ -163,7 +168,10 @@ public void setUp() throws IOException { when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); when(rdbStore.getDbLocation()).thenReturn(dbLocation); this.snapshotUtilMock = mockStatic(SnapshotUtils.class); - snapshotUtilMock.when(() -> SnapshotUtils.isSnapshotPurged(any(), any(), any())).thenReturn(false); + purgedSnapshotIdMap.clear(); + snapshotUtilMock.when(() -> SnapshotUtils.isSnapshotPurged(any(), any(), any())) + .thenAnswer(i -> purgedSnapshotIdMap.getOrDefault(i.getArgument(2), false)); + conf.setInt(OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, -1); } @AfterEach @@ -421,6 +429,75 @@ private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManage } } + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testOrphanVersionDeletionWithVersionDeletion(boolean purgeSnapshot) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + List snapshotIds = createSnapshotLocalData(localDataManager, 3); + UUID firstSnapId = snapshotIds.get(0); + UUID secondSnapId = snapshotIds.get(1); + UUID thirdSnapId = snapshotIds.get(2); + + addVersionsToLocalData(localDataManager, firstSnapId, ImmutableMap.of(1, 1, 2, 2, 3, 3)); + addVersionsToLocalData(localDataManager, secondSnapId, ImmutableMap.of(4, 2, 8, 1, 10, 3, 11, 3)); + addVersionsToLocalData(localDataManager, thirdSnapId, ImmutableMap.of(5, 8, 13, 10)); + assertEquals(new HashSet<>(snapshotIds), localDataManager.getSnapshotToBeCheckedForOrphans().keySet()); + localDataManager.getSnapshotToBeCheckedForOrphans().clear(); + purgedSnapshotIdMap.put(secondSnapId, purgeSnapshot); + localDataManager.checkOrphanSnapshotVersions(omMetadataManager, null, thirdSnapId); + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(thirdSnapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + assertEquals(Sets.newHashSet(0, 13), snapshotLocalData.getVersionSstFileInfos().keySet()); + } + assertTrue(localDataManager.getSnapshotToBeCheckedForOrphans().containsKey(secondSnapId)); + localDataManager.checkOrphanSnapshotVersions(omMetadataManager, null, secondSnapId); + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(secondSnapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + if (purgeSnapshot) { + assertEquals(Sets.newHashSet(0, 10), snapshotLocalData.getVersionSstFileInfos().keySet()); + } else { + assertEquals(Sets.newHashSet(0, 10, 11), snapshotLocalData.getVersionSstFileInfos().keySet()); + } + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testOrphanVersionDeletionWithChainUpdate(boolean purgeSnapshot) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + List snapshotIds = createSnapshotLocalData(localDataManager, 3); + UUID firstSnapId = snapshotIds.get(0); + UUID secondSnapId = snapshotIds.get(1); + UUID thirdSnapId = snapshotIds.get(2); + + addVersionsToLocalData(localDataManager, firstSnapId, ImmutableMap.of(1, 1, 2, 2, 3, 3)); + addVersionsToLocalData(localDataManager, secondSnapId, ImmutableMap.of(4, 2, 8, 1, 10, 3, 11, 3)); + addVersionsToLocalData(localDataManager, thirdSnapId, ImmutableMap.of(5, 8, 13, 10)); + purgedSnapshotIdMap.put(secondSnapId, purgeSnapshot); + try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = + localDataManager.getWritableOmSnapshotLocalData(thirdSnapId, firstSnapId)) { + snapshotLocalDataProvider.commit(); + } + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(thirdSnapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + assertEquals(Sets.newHashSet(0, 5, 13), snapshotLocalData.getVersionSstFileInfos().keySet()); + assertEquals(firstSnapId, snapshotLocalData.getPreviousSnapshotId()); + } + + assertTrue(localDataManager.getSnapshotToBeCheckedForOrphans().containsKey(secondSnapId)); + localDataManager.checkOrphanSnapshotVersions(omMetadataManager, null, secondSnapId); + if (purgeSnapshot) { + NoSuchFileException e = assertThrows(NoSuchFileException.class, + () -> localDataManager.getOmSnapshotLocalData(secondSnapId)); + assertFalse(localDataManager.getVersionNodeMap().containsKey(secondSnapId)); + } else { + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(secondSnapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + assertEquals(Sets.newHashSet(0, 11), snapshotLocalData.getVersionSstFileInfos().keySet()); + } + } + } + /** * Validates write-time version propagation and removal rules when the previous * snapshot already has a concrete version recorded. From 8b014dd981c3a0f4996acb9f52cd5b748c34159f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 19 Oct 2025 23:12:13 -0400 Subject: [PATCH 066/126] HDDS-13783. Add case for commit key in middle of chain Change-Id: I37d2f5c07f405f3069a4cb99881d5d1e67110e79 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 +- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index d9316cbcb2b7..86029241a05b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -341,7 +341,7 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { LocalDataVersionNode existingVersionNode = existingVersion.getValue(); - predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); + predecessors.put(existingVersion.getKey(), new HashSet<>(localDataGraph.predecessors(existingVersionNode))); localDataGraph.removeNode(existingVersionNode); } // Add the nodes to be added in the graph and map. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 0be19dfb36ee..1a444d688ef0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -440,11 +440,13 @@ private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManage @ValueSource(booleans = {true, false}) public void testWriteVersionValidation(boolean nextVersionExisting) throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); - List snapshotIds = createSnapshotLocalData(localDataManager, 2); + List snapshotIds = createSnapshotLocalData(localDataManager, 3); UUID prevSnapId = snapshotIds.get(0); UUID snapId = snapshotIds.get(1); + UUID nextSnapId = snapshotIds.get(2); addVersionsToLocalData(localDataManager, prevSnapId, ImmutableMap.of(4, 1)); addVersionsToLocalData(localDataManager, snapId, ImmutableMap.of(5, 4)); + addVersionsToLocalData(localDataManager, nextSnapId, ImmutableMap.of(6, 0)); validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0, 5)); validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0, 4)); From 57662c651945592230c1dc7f83d7f29d481dee0f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 20 Oct 2025 07:42:27 -0400 Subject: [PATCH 067/126] HDDS-13783. Convert set to list of predecessors Change-Id: I119e8cede140c755d3cd09c0a56234ff4906be98 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 13 ++++++++----- .../om/snapshot/TestOmSnapshotLocalDataManager.java | 10 ++++++++++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 86029241a05b..74a4c89cfd11 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -28,6 +28,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardCopyOption; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; @@ -337,18 +338,20 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : existingSnapVersions.getSnapshotVersions(); - Map> predecessors = new HashMap<>(); + Map> predecessors = new HashMap<>(); // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { LocalDataVersionNode existingVersionNode = existingVersion.getValue(); - predecessors.put(existingVersion.getKey(), new HashSet<>(localDataGraph.predecessors(existingVersionNode))); + // Create a copy of predecessors since the list of nodes returned would be a mutable set and it changes as the + // nodes in the graph would change. + predecessors.put(existingVersion.getKey(), new ArrayList<>(localDataGraph.predecessors(existingVersionNode))); localDataGraph.removeNode(existingVersionNode); } // Add the nodes to be added in the graph and map. addSnapshotVersionMeta(snapshotId, snapshotVersions); // Reconnect all the predecessors for existing nodes. for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { - for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { + for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptyList())) { localDataGraph.putEdge(predecessor, entry.getValue()); } } @@ -514,11 +517,11 @@ private LockDataProviderInitResult initialize( Set versionNode = localDataGraph.successors(entry.getValue()); if (versionNode.size() > 1) { throw new IOException(String.format("Snapshot %s version %d has multiple successors %s", - currentIteratedSnapshotId, entry.getValue(), versionNode)); + currentIteratedSnapshotId, entry.getValue().getVersion(), versionNode)); } if (versionNode.isEmpty()) { throw new IOException(String.format("Snapshot %s version %d doesn't have successor", - currentIteratedSnapshotId, entry.getValue())); + currentIteratedSnapshotId, entry.getValue().getVersion())); } // Set the version node for iterated version to the successor corresponding to the previous snapshot id. entry.setValue(versionNode.iterator().next()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 1a444d688ef0..947c1a4b7f47 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -472,6 +472,16 @@ public void testWriteVersionValidation(boolean nextVersionExisting) throws IOExc } validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0)); validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0)); + // Check next snapshot is able to resolve to previous snapshot. + try (ReadableOmSnapshotLocalDataProvider nextSnap = localDataManager.getOmSnapshotLocalData(nextSnapId, + prevSnapId)) { + OmSnapshotLocalData snapshotLocalData = nextSnap.getSnapshotLocalData(); + assertEquals(prevSnapId, snapshotLocalData.getPreviousSnapshotId()); + snapshotLocalData.getVersionSstFileInfos() + .forEach((version, versionMeta) -> { + assertEquals(0, versionMeta.getPreviousSnapshotVersion()); + }); + } } } From 5f0bb916662a9e0f168e5b4cd23b74e5be740fc1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 23 Oct 2025 21:22:21 -0400 Subject: [PATCH 068/126] HDDS-13833. Add transactionInfo field in SnapshotLocalData and update the value on SnapshotPurgeRequest Change-Id: I0e9de1954fc65a907b9f4a98d6f84b2772d15551 --- .../org/apache/hadoop/ozone/OzoneConsts.java | 1 + .../hadoop/ozone/om/OmSnapshotLocalData.java | 17 ++++++++++- .../ozone/om/OmSnapshotLocalDataYaml.java | 8 +++++- .../snapshot/OMSnapshotPurgeRequest.java | 6 ++-- .../snapshot/OMSnapshotPurgeResponse.java | 27 ++++++++++++++---- .../snapshot/OmSnapshotLocalDataManager.java | 10 ++++++- .../ozone/om/TestOmSnapshotLocalDataYaml.java | 28 ++++++++++++++----- ...TestOMSnapshotPurgeRequestAndResponse.java | 18 ++++++++++-- .../TestOmSnapshotLocalDataManager.java | 23 ++++++++++++++- 9 files changed, 118 insertions(+), 20 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index cb4490c2c1db..aecbdfae615d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -221,6 +221,7 @@ public final class OzoneConsts { public static final String OM_SST_FILE_INFO_START_KEY = "startKey"; public static final String OM_SST_FILE_INFO_END_KEY = "endKey"; public static final String OM_SST_FILE_INFO_COL_FAMILY = "columnFamily"; + public static final String OM_SLD_TXN_INFO = "transactionInfo"; // YAML fields for .container files public static final String CONTAINER_ID = "containerID"; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 1c840a1cd2e9..02e07914b311 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -30,6 +30,7 @@ import java.util.UUID; import java.util.stream.Collectors; import org.apache.commons.codec.digest.DigestUtils; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.CopyObject; import org.apache.hadoop.ozone.util.WithChecksum; import org.apache.ozone.compaction.log.SstFileInfo; @@ -63,6 +64,9 @@ public class OmSnapshotLocalData implements WithChecksum { // Previous snapshotId based on which the snapshot local data is built. private UUID previousSnapshotId; + // Stores the transactionInfo corresponding to OM when the snaphot is purged. + private TransactionInfo transactionInfo; + // Map of version to VersionMeta, using linkedHashMap since the order of the map needs to be deterministic for // checksum computation. private final LinkedHashMap versionSstFileInfos; @@ -73,7 +77,8 @@ public class OmSnapshotLocalData implements WithChecksum { /** * Creates a OmSnapshotLocalData object with default values. */ - public OmSnapshotLocalData(UUID snapshotId, List notDefraggedSSTFileList, UUID previousSnapshotId) { + public OmSnapshotLocalData(UUID snapshotId, List notDefraggedSSTFileList, UUID previousSnapshotId, + TransactionInfo transactionInfo) { this.snapshotId = snapshotId; this.isSSTFiltered = false; this.lastDefragTime = 0L; @@ -83,6 +88,7 @@ public OmSnapshotLocalData(UUID snapshotId, List notDefraggedS new VersionMeta(0, notDefraggedSSTFileList.stream().map(SstFileInfo::new).collect(Collectors.toList()))); this.version = 0; this.previousSnapshotId = previousSnapshotId; + this.transactionInfo = transactionInfo; setChecksumTo0ByteArray(); } @@ -101,6 +107,15 @@ public OmSnapshotLocalData(OmSnapshotLocalData source) { this.previousSnapshotId = source.previousSnapshotId; this.versionSstFileInfos = new LinkedHashMap<>(); setVersionSstFileInfos(source.versionSstFileInfos); + this.transactionInfo = source.transactionInfo; + } + + public TransactionInfo getTransactionInfo() { + return transactionInfo; + } + + public void setTransactionInfo(TransactionInfo transactionInfo) { + this.transactionInfo = transactionInfo; } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index c376e9a332c0..344d7305db43 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -24,6 +24,7 @@ import org.apache.commons.pool2.BasePooledObjectFactory; import org.apache.commons.pool2.PooledObject; import org.apache.commons.pool2.impl.DefaultPooledObject; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.ozone.compaction.log.SstFileInfo; @@ -71,6 +72,8 @@ private static class OmSnapshotLocalDataRepresenter extends Representer { this.addClassTag(SstFileInfo.class, SST_FILE_INFO_TAG); representers.put(SstFileInfo.class, new RepresentSstFileInfo()); representers.put(VersionMeta.class, new RepresentVersionMeta()); + representers.put(TransactionInfo.class, data -> new ScalarNode(Tag.STR, data.toString(), null, null, + DumperOptions.ScalarStyle.PLAIN)); representers.put(UUID.class, data -> new ScalarNode(Tag.STR, data.toString(), null, null, DumperOptions.ScalarStyle.PLAIN)); } @@ -168,7 +171,10 @@ public Object construct(Node node) { UUID snapId = UUID.fromString(snapIdStr); final String prevSnapIdStr = (String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID); UUID prevSnapId = prevSnapIdStr != null ? UUID.fromString(prevSnapIdStr) : null; - OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId); + final String purgeTxInfoStr = (String) nodes.get(OzoneConsts.OM_SLD_TXN_INFO); + TransactionInfo transactionInfo = purgeTxInfoStr != null ? TransactionInfo.valueOf(purgeTxInfoStr) : null; + OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId, + transactionInfo); // Set version from YAML Integer version = (Integer) nodes.get(OzoneConsts.OM_SLD_VERSION); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 5524371bf1e2..a1a1d306c238 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -91,6 +91,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut List snapshotDbKeys = snapshotPurgeRequest .getSnapshotDBKeysList(); + TransactionInfo transactionInfo = TransactionInfo.valueOf(context.getTermIndex()); try { // Each snapshot purge operation does three things: @@ -123,12 +124,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut } // Update the snapshotInfo lastTransactionInfo. for (SnapshotInfo snapshotInfo : updatedSnapshotInfos.values()) { - snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); + snapshotInfo.setLastTransactionInfo(transactionInfo.toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapshotInfo.getTableKey()), CacheValue.get(context.getIndex(), snapshotInfo)); } - omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos); + omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos, + transactionInfo); omSnapshotIntMetrics.incNumSnapshotPurges(); LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating snapshots:{}.", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 3797b3fcf2eb..8a370cb975e5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -36,6 +37,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.WritableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,15 +51,18 @@ public class OMSnapshotPurgeResponse extends OMClientResponse { LoggerFactory.getLogger(OMSnapshotPurgeResponse.class); private final List snapshotDbKeys; private final Map updatedSnapInfos; + private final TransactionInfo transactionInfo; public OMSnapshotPurgeResponse( @Nonnull OMResponse omResponse, @Nonnull List snapshotDbKeys, - Map updatedSnapInfos + Map updatedSnapInfos, + TransactionInfo transactionInfo ) { super(omResponse); this.snapshotDbKeys = snapshotDbKeys; this.updatedSnapInfos = updatedSnapInfos; + this.transactionInfo = transactionInfo; } /** @@ -69,6 +74,7 @@ public OMSnapshotPurgeResponse(@Nonnull OMResponse omResponse) { checkStatusNotOK(); this.snapshotDbKeys = null; this.updatedSnapInfos = null; + this.transactionInfo = null; } @Override @@ -96,10 +102,14 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, // Remove the snapshot from snapshotId to snapshotTableKey map. ((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager() .removeFromSnapshotIdToTable(snapshotInfo.getSnapshotId()); - // Delete Snapshot checkpoint directory. + OmSnapshotLocalDataManager snapshotLocalDataManager = ((OmMetadataManagerImpl) omMetadataManager) .getOzoneManager().getOmSnapshotManager().getSnapshotLocalDataManager(); - deleteCheckpointDirectory(snapshotLocalDataManager, omMetadataManager, snapshotInfo); + // Update snapshot local data to update purge transaction info. This would be used to check whether the + // snapshot purged txn is flushed to rocksdb. + updateLocalData(snapshotLocalDataManager, snapshotInfo); + // Delete Snapshot checkpoint directory. + deleteCheckpointDirectory(omMetadataManager, snapshotInfo); // Delete snapshotInfo from the table. omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey); } @@ -115,11 +125,18 @@ private void updateSnapInfo(OmMetadataManagerImpl metadataManager, } } + private void updateLocalData(OmSnapshotLocalDataManager localDataManager, SnapshotInfo snapshotInfo) + throws IOException { + try (WritableOmSnapshotLocalDataProvider snap = localDataManager.getWritableOmSnapshotLocalData(snapshotInfo)) { + snap.setTransactionInfo(this.transactionInfo); + snap.commit(); + } + } + /** * Deletes the checkpoint directory for a snapshot. */ - private void deleteCheckpointDirectory(OmSnapshotLocalDataManager snapshotLocalDataManager, - OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) { + private void deleteCheckpointDirectory(OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) { // Acquiring write lock to avoid race condition with sst filtering service which creates a sst filtered file // inside the snapshot directory. Any operation apart which doesn't create/delete files under this snapshot // directory can run in parallel along with this operation. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 74a4c89cfd11..09fdbee0af0b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -44,6 +44,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; @@ -136,7 +137,8 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf try (WritableOmSnapshotLocalDataProvider snapshotLocalData = new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(), () -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), - OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()), + OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId(), + null), null))) { snapshotLocalData.commit(); } @@ -652,6 +654,12 @@ public void removeVersion(int version) { setDirty(); } + public void setTransactionInfo(TransactionInfo transactionInfo) { + this.getSnapshotLocalData().setTransactionInfo(transactionInfo); + // Set Dirty when the transactionInfo is set. + setDirty(); + } + public synchronized void commit() throws IOException { // Validate modification and commit the changes. SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index b234014ebbc0..2f8b7be9a195 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -37,11 +37,13 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.hadoop.ozone.util.ObjectSerializer; @@ -106,7 +108,8 @@ private LiveFileMetaData createLiveFileMetaData(String fileName, String table, S /** * Creates a snapshot local data YAML file. */ - private Pair writeToYaml(UUID snapshotId, String snapshotName) throws IOException { + private Pair writeToYaml(UUID snapshotId, String snapshotName, TransactionInfo transactionInfo) + throws IOException { String yamlFilePath = snapshotName + ".yaml"; UUID previousSnapshotId = UUID.randomUUID(); // Create snapshot data with not defragged SST files @@ -115,7 +118,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw createLiveFileMetaData("sst2", "table1", "k3", "k4"), createLiveFileMetaData("sst3", "table2", "k4", "k5")); OmSnapshotLocalData dataYaml = new OmSnapshotLocalData(snapshotId, notDefraggedSSTFileList, - previousSnapshotId); + previousSnapshotId, transactionInfo); // Set version dataYaml.setVersion(42); @@ -150,7 +153,9 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw @Test public void testWriteToYaml() throws IOException { UUID snapshotId = UUID.randomUUID(); - Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot1"); + TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), + ThreadLocalRandom.current().nextLong()); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot1", transactionInfo); File yamlFile = yamlFilePrevIdPair.getLeft(); UUID prevSnapId = yamlFilePrevIdPair.getRight(); @@ -160,6 +165,7 @@ public void testWriteToYaml() throws IOException { // Verify fields assertEquals(44, snapshotData.getVersion()); assertTrue(snapshotData.getSstFiltered()); + assertEquals(transactionInfo, snapshotData.getTransactionInfo()); VersionMeta notDefraggedSSTFiles = snapshotData.getVersionSstFileInfos().get(0); assertEquals(new VersionMeta(0, @@ -192,17 +198,19 @@ public void testWriteToYaml() throws IOException { @Test public void testUpdateSnapshotDataFile() throws IOException { UUID snapshotId = UUID.randomUUID(); - Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2"); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2", null); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file OmSnapshotLocalData dataYaml = omSnapshotLocalDataSerializer.load(yamlFile); - + TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), + ThreadLocalRandom.current().nextLong()); // Update snapshot data dataYaml.setSstFiltered(false); dataYaml.setNeedsDefrag(false); dataYaml.addVersionSSTFileInfos( singletonList(createLiveFileMetaData("defragged-sst4", "table3", "k5", "k6")), 5); + dataYaml.setTransactionInfo(transactionInfo); // Write updated data back to file omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); @@ -213,6 +221,7 @@ public void testUpdateSnapshotDataFile() throws IOException { // Verify updated data assertThat(dataYaml.getSstFiltered()).isFalse(); assertThat(dataYaml.getNeedsDefrag()).isFalse(); + assertEquals(transactionInfo, dataYaml.getTransactionInfo()); Map defraggedFiles = dataYaml.getVersionSstFileInfos(); assertEquals(4, defraggedFiles.size()); @@ -234,7 +243,9 @@ public void testEmptyFile() throws IOException { @Test public void testChecksum() throws IOException { UUID snapshotId = UUID.randomUUID(); - Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3"); + TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), + ThreadLocalRandom.current().nextLong()); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3", transactionInfo); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file OmSnapshotLocalData snapshotData = omSnapshotLocalDataSerializer.load(yamlFile); @@ -251,7 +262,9 @@ public void testChecksum() throws IOException { @Test public void testYamlContainsAllFields() throws IOException { UUID snapshotId = UUID.randomUUID(); - Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot4"); + TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), + ThreadLocalRandom.current().nextLong()); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot4", transactionInfo); File yamlFile = yamlFilePrevIdPair.getLeft(); String content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); @@ -264,5 +277,6 @@ public void testYamlContainsAllFields() throws IOException { assertThat(content).contains(OzoneConsts.OM_SLD_VERSION_SST_FILE_INFO); assertThat(content).contains(OzoneConsts.OM_SLD_SNAP_ID); assertThat(content).contains(OzoneConsts.OM_SLD_PREV_SNAP_ID); + assertThat(content).contains(OzoneConsts.OM_SLD_TXN_INFO); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index 35053882eeda..b78975ef0816 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -52,6 +52,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; @@ -159,6 +160,10 @@ public void testValidateAndUpdateCache() throws Exception { List snapshotDbKeysToPurge = createSnapshots(10); assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); + List snapshotInfos = new ArrayList<>(); + for (String snapshotKey : snapshotDbKeysToPurge) { + snapshotInfos.add(getOmMetadataManager().getSnapshotInfoTable().get(snapshotKey)); + } // Check if all the checkpoints are created. for (Path checkpoint : checkpointPaths) { @@ -171,9 +176,9 @@ public void testValidateAndUpdateCache() throws Exception { snapshotDbKeysToPurge); OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); - + TransactionInfo transactionInfo = TransactionInfo.valueOf(TransactionInfo.getTermIndex(200L)); OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), transactionInfo.getTransactionIndex()); for (String snapshotTableKey: snapshotDbKeysToPurge) { assertNull(getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey)); @@ -191,6 +196,15 @@ public void testValidateAndUpdateCache() throws Exception { for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); } + OmSnapshotLocalDataManager snapshotLocalDataManager = + getOzoneManager().getOmSnapshotManager().getSnapshotLocalDataManager(); + for (SnapshotInfo snapshotInfo : snapshotInfos) { + try (ReadableOmSnapshotLocalDataProvider snapProvider = + snapshotLocalDataManager.getOmSnapshotLocalData(snapshotInfo)) { + assertEquals(transactionInfo, snapProvider.getSnapshotLocalData().getTransactionInfo()); + } + } + assertEquals(initialSnapshotPurgeCount + 1, getOmSnapshotIntMetrics().getNumSnapshotPurges()); assertEquals(initialSnapshotPurgeFailCount, getOmSnapshotIntMetrics().getNumSnapshotPurgeFails()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 947c1a4b7f47..bfaa48c04feb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -49,6 +49,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -56,6 +57,7 @@ import org.apache.commons.compress.utils.Sets; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; @@ -374,6 +376,25 @@ public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExis } } + @Test + public void testUpdateTransactionInfo() throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), + ThreadLocalRandom.current().nextLong()); + UUID snapshotId = createSnapshotLocalData(localDataManager, 1).get(0); + try (WritableOmSnapshotLocalDataProvider snap = localDataManager.getWritableOmSnapshotLocalData(snapshotId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + assertNull(snapshotLocalData.getTransactionInfo()); + snap.setTransactionInfo(transactionInfo); + snap.commit(); + } + + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapshotId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + assertEquals(transactionInfo, snapshotLocalData.getTransactionInfo()); + } + } + @Test public void testAddVersionFromRDB() throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); @@ -774,7 +795,7 @@ private OmSnapshotLocalData createMockLocalData(UUID snapshotId, UUID previousSn sstFiles.add(createMockLiveFileMetaData("file2.sst", "columnFamily1", "key3", "key10")); sstFiles.add(createMockLiveFileMetaData("file3.sst", "columnFamily2", "key1", "key8")); sstFiles.add(createMockLiveFileMetaData("file4.sst", "columnFamily2", "key0", "key10")); - return new OmSnapshotLocalData(snapshotId, sstFiles, previousSnapshotId); + return new OmSnapshotLocalData(snapshotId, sstFiles, previousSnapshotId, null); } private void createSnapshotLocalDataFile(UUID snapshotId, UUID previousSnapshotId) From 5b55a59757e87a55ad441445c5a702c279c80ccb Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 23 Oct 2025 21:59:27 -0400 Subject: [PATCH 069/126] HDDS-13785. Merge with HDDS-13833 Change-Id: I20189e6a47d6fec15f57f1770ee75010a1bc6edb --- .../apache/hadoop/ozone/om/OmSnapshotManager.java | 10 ++++++++++ .../om/snapshot/OmSnapshotLocalDataManager.java | 14 +++++++++----- .../hadoop/ozone/om/snapshot/SnapshotUtils.java | 11 ----------- .../snapshot/TestOmSnapshotLocalDataManager.java | 4 ++-- 4 files changed, 21 insertions(+), 18 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index ad3a820c2c95..3e7c0c08702c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -345,6 +345,16 @@ public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { } } + public static boolean isSnapshotPurged(SnapshotChainManager chainManager, OMMetadataManager omMetadataManager, + UUID snapshotId, TransactionInfo transactionInfo) throws IOException { + String tableKey = chainManager.getTableKey(snapshotId); + if (tableKey == null) { + return true; + } + return !omMetadataManager.getSnapshotInfoTable().isExist(tableKey) && transactionInfo != null && + isTransactionFlushedToDisk(omMetadataManager, transactionInfo); + } + /** * Help reject OM startup if snapshot feature is disabled * but there are snapshots remaining in this OM. Note: snapshots that are diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index a3e6be683721..1c2eeb3a0f58 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -325,7 +325,8 @@ void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChai try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = new WritableOmSnapshotLocalDataProvider( snapshotId)) { OmSnapshotLocalData snapshotLocalData = snapshotLocalDataProvider.getSnapshotLocalData(); - boolean isSnapshotPurged = SnapshotUtils.isSnapshotPurged(chainManager, metadataManager, snapshotId); + boolean isSnapshotPurged = OmSnapshotManager.isSnapshotPurged(chainManager, metadataManager, snapshotId, + snapshotLocalData.getTransactionInfo()); for (Map.Entry integerLocalDataVersionNodeEntry : getVersionNodeMap() .get(snapshotId).getSnapshotVersions().entrySet()) { LocalDataVersionNode versionEntry = integerLocalDataVersionNodeEntry.getValue(); @@ -431,7 +432,8 @@ private OmSnapshotLocalData getSnapshotLocalData() { } } - private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, + boolean transactionInfoSet) throws IOException { SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : existingSnapVersions.getSnapshotVersions(); @@ -463,9 +465,11 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps snapshotVersions.getPreviousSnapshotId())) { increamentOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); } - // If the version is also updated it could mean that there could be some orphan version present within the + // If the transactionInfo set this means the snapshot has been purged and the entire yaml file could have + // become an orphan if the version is also updated it + // could mean that there could be some orphan version present within the // same snapshot. - if (existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { + if (transactionInfoSet || existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { increamentOrphanCheckCount(snapshotId); } } @@ -802,7 +806,7 @@ public synchronized void commit() throws IOException { throw new IOException("Unable to delete file " + snapshotLocalDataFile.getAbsolutePath()); } } - upsertNode(super.snapshotId, localDataVersionNodes); + upsertNode(super.snapshotId, localDataVersionNodes, getSnapshotLocalData().getTransactionInfo() != null); // Reset dirty bit resetDirty(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index f6d9d7ae17e2..63e7e38d518f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -34,8 +34,6 @@ import java.util.Objects; import java.util.Optional; import java.util.UUID; -import org.apache.hadoop.hdds.utils.db.CodecException; -import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -91,15 +89,6 @@ public static SnapshotInfo getSnapshotInfo(final OzoneManager ozoneManager, return snapshotInfo; } - public static boolean isSnapshotPurged(SnapshotChainManager chainManager, OMMetadataManager omMetadataManager, - UUID snapshotId) throws RocksDatabaseException, CodecException { - String tableKey = chainManager.getTableKey(snapshotId); - if (tableKey == null) { - return true; - } - return !omMetadataManager.getSnapshotInfoTable().isExist(tableKey); - } - public static SnapshotInfo getSnapshotInfo(OzoneManager ozoneManager, SnapshotChainManager chainManager, UUID snapshotId) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 2b029a3a27b4..d4233591be34 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -171,7 +171,7 @@ public void setUp() throws IOException { when(rdbStore.getDbLocation()).thenReturn(dbLocation); this.snapshotUtilMock = mockStatic(SnapshotUtils.class); purgedSnapshotIdMap.clear(); - snapshotUtilMock.when(() -> SnapshotUtils.isSnapshotPurged(any(), any(), any())) + snapshotUtilMock.when(() -> OmSnapshotManager.isSnapshotPurged(any(), any(), any(), any())) .thenAnswer(i -> purgedSnapshotIdMap.getOrDefault(i.getArgument(2), false)); conf.setInt(OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, -1); } @@ -397,7 +397,7 @@ public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExis @Test public void testUpdateTransactionInfo() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); UUID snapshotId = createSnapshotLocalData(localDataManager, 1).get(0); From cc35056c3b9b5548a752ed07bd372e04bb805cab Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 24 Oct 2025 20:38:06 -0400 Subject: [PATCH 070/126] HDDS-13783. Make local data graph synchrnous Change-Id: I2f7cb5ec772d2d2de99e8d3cba8bdc34e3f36efd --- .../snapshot/OmSnapshotLocalDataManager.java | 125 ++++++++++-------- 1 file changed, 72 insertions(+), 53 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 74a4c89cfd11..99461bb8cab6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -40,6 +40,7 @@ import java.util.Set; import java.util.Stack; import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; @@ -78,6 +79,8 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private final OMMetadataManager omMetadataManager; // Used for acquiring locks on the entire data structure. private final ReadWriteLock fullLock; + // Used for taking a lock on internal data structure Map and Graph to ensure thread safety; + private final ReadWriteLock internalLock; // Locks should be always acquired by iterating through the snapshot chain to avoid deadlocks. private HierarchicalResourceLockManager locks; @@ -92,8 +95,9 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO data.computeAndSetChecksum(yaml); } }; - this.versionNodeMap = new HashMap<>(); + this.versionNodeMap = new ConcurrentHashMap<>(); this.fullLock = new ReentrantReadWriteLock(); + this.internalLock = new ReentrantReadWriteLock(); init(); } @@ -334,29 +338,6 @@ private OmSnapshotLocalData getSnapshotLocalData() { } } - private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { - SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); - Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : - existingSnapVersions.getSnapshotVersions(); - Map> predecessors = new HashMap<>(); - // Track all predecessors of the existing versions and remove the node from the graph. - for (Map.Entry existingVersion : existingVersions.entrySet()) { - LocalDataVersionNode existingVersionNode = existingVersion.getValue(); - // Create a copy of predecessors since the list of nodes returned would be a mutable set and it changes as the - // nodes in the graph would change. - predecessors.put(existingVersion.getKey(), new ArrayList<>(localDataGraph.predecessors(existingVersionNode))); - localDataGraph.removeNode(existingVersionNode); - } - // Add the nodes to be added in the graph and map. - addSnapshotVersionMeta(snapshotId, snapshotVersions); - // Reconnect all the predecessors for existing nodes. - for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { - for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptyList())) { - localDataGraph.putEdge(predecessor, entry.getValue()); - } - } - } - /** * The ReadableOmSnapshotLocalDataProvider class is responsible for managing the * access and initialization of local snapshot data in a thread-safe manner. @@ -480,11 +461,11 @@ private LockDataProviderInitResult initialize( toResolveSnapshotId = (isSnapshotToBeResolvedNullable || toResolveSnapshotId != null) ? toResolveSnapshotId : ssLocalData.getPreviousSnapshotId(); if (toResolveSnapshotId != null && previousSnapshotId != null) { + previousReadLockAcquired = acquireLock(previousSnapshotId, true); if (!versionNodeMap.containsKey(previousSnapshotId)) { throw new IOException(String.format("Operating on snapshot id : %s with previousSnapshotId: %s invalid " + "since previousSnapshotId is not loaded.", snapId, previousSnapshotId)); } - previousReadLockAcquired = acquireLock(previousSnapshotId, true); // Create a copy of the previous versionMap to get the previous versions corresponding to the previous // snapshot. This map would mutated to resolve the previous snapshot's version corresponding to the // toResolveSnapshotId by iterating through the chain of previous snapshot ids. @@ -509,22 +490,27 @@ private LockDataProviderInitResult initialize( } UUID previousId = previousIds.iterator().next(); HierarchicalResourceLock previousToPreviousReadLockAcquired = acquireLock(previousId, true); - try { // Get the version node for the snapshot and update the version node to the successor to point to the // previous node. for (Map.Entry entry : previousVersionNodeMap.entrySet()) { - Set versionNode = localDataGraph.successors(entry.getValue()); - if (versionNode.size() > 1) { - throw new IOException(String.format("Snapshot %s version %d has multiple successors %s", - currentIteratedSnapshotId, entry.getValue().getVersion(), versionNode)); - } - if (versionNode.isEmpty()) { - throw new IOException(String.format("Snapshot %s version %d doesn't have successor", - currentIteratedSnapshotId, entry.getValue().getVersion())); + internalLock.readLock().lock(); + try { + Set versionNode = localDataGraph.successors(entry.getValue()); + if (versionNode.size() > 1) { + throw new IOException(String.format("Snapshot %s version %d has multiple successors %s", + currentIteratedSnapshotId, entry.getValue().getVersion(), versionNode)); + } + if (versionNode.isEmpty()) { + throw new IOException(String.format("Snapshot %s version %d doesn't have successor", + currentIteratedSnapshotId, entry.getValue().getVersion())); + } + // Set the version node for iterated version to the successor corresponding to the previous snapshot + // id. + entry.setValue(versionNode.iterator().next()); + } finally { + internalLock.readLock().unlock(); } - // Set the version node for iterated version to the successor corresponding to the previous snapshot id. - entry.setValue(versionNode.iterator().next()); } } finally { // Release the read lock acquired on the previous snapshot id acquired. Now that the instance @@ -616,25 +602,30 @@ private WritableOmSnapshotLocalDataProvider(UUID snapshotId, private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLocalData) throws IOException { - SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); - SnapshotVersionsMeta existingVersionsMeta = getVersionNodeMap().get(snapshotLocalData.getSnapshotId()); - for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { - validateVersionAddition(node); - } - UUID snapshotId = snapshotLocalData.getSnapshotId(); - Map existingVersions = getVersionNodeMap().containsKey(snapshotId) ? - getVersionNodeMap().get(snapshotId).getSnapshotVersions() : Collections.emptyMap(); - for (Map.Entry entry : existingVersions.entrySet()) { - if (!versionsToBeAdded.getSnapshotVersions().containsKey(entry.getKey())) { - validateVersionRemoval(snapshotId, entry.getKey()); + internalLock.readLock().lock(); + try { + SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); + SnapshotVersionsMeta existingVersionsMeta = getVersionNodeMap().get(snapshotLocalData.getSnapshotId()); + for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { + validateVersionAddition(node); } + UUID snapshotId = snapshotLocalData.getSnapshotId(); + Map existingVersions = getVersionNodeMap().containsKey(snapshotId) ? + getVersionNodeMap().get(snapshotId).getSnapshotVersions() : Collections.emptyMap(); + for (Map.Entry entry : existingVersions.entrySet()) { + if (!versionsToBeAdded.getSnapshotVersions().containsKey(entry.getKey())) { + validateVersionRemoval(snapshotId, entry.getKey()); + } + } + // Set Dirty if the snapshot doesn't exist or previousSnapshotId has changed. + if (existingVersionsMeta == null || !Objects.equals(versionsToBeAdded.getPreviousSnapshotId(), + existingVersionsMeta.getPreviousSnapshotId())) { + setDirty(); + } + return versionsToBeAdded; + } finally { + internalLock.readLock().unlock(); } - // Set Dirty if the snapshot doesn't exist or previousSnapshotId has changed. - if (existingVersionsMeta == null || !Objects.equals(versionsToBeAdded.getPreviousSnapshotId(), - existingVersionsMeta.getPreviousSnapshotId())) { - setDirty(); - } - return versionsToBeAdded; } public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { @@ -676,6 +667,34 @@ public synchronized void commit() throws IOException { } } + private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + internalLock.writeLock().lock(); + try { + SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); + Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : + existingSnapVersions.getSnapshotVersions(); + Map> predecessors = new HashMap<>(); + // Track all predecessors of the existing versions and remove the node from the graph. + for (Map.Entry existingVersion : existingVersions.entrySet()) { + LocalDataVersionNode existingVersionNode = existingVersion.getValue(); + // Create a copy of predecessors since the list of nodes returned would be a mutable set and it changes as the + // nodes in the graph would change. + predecessors.put(existingVersion.getKey(), new ArrayList<>(localDataGraph.predecessors(existingVersionNode))); + localDataGraph.removeNode(existingVersionNode); + } + // Add the nodes to be added in the graph and map. + addSnapshotVersionMeta(snapshotId, snapshotVersions); + // Reconnect all the predecessors for existing nodes. + for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { + for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptyList())) { + localDataGraph.putEdge(predecessor, entry.getValue()); + } + } + } finally { + internalLock.writeLock().unlock(); + } + } + private void setDirty() { dirty = true; } From 3f59895032771c884ef1f7f58367cfd0d470cbf9 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 24 Oct 2025 20:51:49 -0400 Subject: [PATCH 071/126] HDDS-13785. Use internal lock on orphan block cleanup Change-Id: I743bd8877d90037b9892f75d344c773604362eb0 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 86a41e4fb967..f94ccf350208 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -336,12 +336,18 @@ void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChai // remove the version entry if it is not referenced by any other snapshot version node. For version node 0 // a newly created snapshot version could point to a version with indegree 0 in such a scenario a version 0 // node can be only deleted if the snapshot is also purged. - boolean toRemove = localDataGraph.inDegree(versionEntry) == 0 - && ((versionEntry.getVersion() != 0 && versionEntry.getVersion() != snapshotLocalData.getVersion()) - || isSnapshotPurged); - if (toRemove) { - snapshotLocalDataProvider.removeVersion(versionEntry.getVersion()); + internalLock.readLock().lock(); + try { + boolean toRemove = localDataGraph.inDegree(versionEntry) == 0 + && ((versionEntry.getVersion() != 0 && versionEntry.getVersion() != snapshotLocalData.getVersion()) + || isSnapshotPurged); + if (toRemove) { + snapshotLocalDataProvider.removeVersion(versionEntry.getVersion()); + } + } finally { + internalLock.readLock().unlock(); } + } snapshotLocalDataProvider.commit(); } From 616bef3682fa1fd12a0e3a550421476cbed2ac96 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 27 Oct 2025 14:01:27 -0400 Subject: [PATCH 072/126] HDDS-13783. Fix NPE with concurrentHashMap Change-Id: I960fd1d25e0f323d56f5e88ffb03261d777d9021 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 99461bb8cab6..bfe74822476d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -179,7 +179,7 @@ public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws } private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { - if (!versionNodeMap.containsKey(snapshotId)) { + if (snapshotId == null || !versionNodeMap.containsKey(snapshotId)) { return null; } return versionNodeMap.get(snapshotId).getVersionNode(version); @@ -190,7 +190,7 @@ private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapsh if (!versionNodeMap.containsKey(snapshotId)) { for (LocalDataVersionNode versionNode : snapshotVersionsMeta.getSnapshotVersions().values()) { validateVersionAddition(versionNode); - LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : + LocalDataVersionNode previousVersionNode = getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); localDataGraph.addNode(versionNode); if (previousVersionNode != null) { From b0023d11132696b9f560c88d8376652be2615a78 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 28 Oct 2025 06:27:34 -0400 Subject: [PATCH 073/126] HDDS-13830. Snapshot Rocks DB directory path computation based on local version of snapshot Change-Id: Ib4af980b466bda88b19c5793c1c666bc66895a30 --- .../hadoop/ozone/client/OzoneSnapshot.java | 2 +- .../ozone/client/TestOzoneSnapshot.java | 4 +- .../hadoop/ozone/om/helpers/SnapshotInfo.java | 39 +++-------- .../ozone/om/helpers/TestOmSnapshotInfo.java | 1 - .../hadoop/fs/ozone/TestOzoneFsSnapshot.java | 2 +- .../hadoop/ozone/freon/TestOMSnapshotDAG.java | 2 +- .../ozone/om/TestOMDbCheckpointServlet.java | 2 +- ...stOMDbCheckpointServletInodeBasedXfer.java | 2 +- .../hadoop/ozone/om/TestOMRatisSnapshots.java | 6 +- .../ozone/om/snapshot/TestOmSnapshot.java | 2 +- .../om/snapshot/TestOmSnapshotFileSystem.java | 2 +- .../snapshot/TestOzoneManagerHASnapshot.java | 2 +- .../snapshot/TestOzoneManagerSnapshotAcl.java | 2 +- .../om/snapshot/TestOzoneSnapshotRestore.java | 2 +- .../TestSnapshotBackgroundServices.java | 2 +- .../ozone/om/OMDBCheckpointServlet.java | 15 ++-- .../OMDBCheckpointServletInodeBasedXfer.java | 20 +++--- .../hadoop/ozone/om/OmSnapshotManager.java | 32 ++++----- .../ozone/om/SnapshotDefragService.java | 12 +++- .../hadoop/ozone/om/SstFilteringService.java | 5 +- .../snapshot/OMSnapshotPurgeResponse.java | 22 +++--- .../snapshot/OmSnapshotLocalDataManager.java | 69 +++++++++++++++---- .../ozone/om/TestOmSnapshotManager.java | 2 +- .../TestOMSnapshotCreateResponse.java | 2 +- .../TestOMSnapshotDeleteResponse.java | 2 +- .../TestOmSnapshotLocalDataManager.java | 4 +- .../ozone/om/snapshot/TestSnapshotChain.java | 1 - .../om/snapshot/TestSnapshotDiffManager.java | 3 - .../ozone/om/snapshot/TestSnapshotInfo.java | 1 - .../TestSnapshotRequestAndResponse.java | 2 +- .../om/snapshot/TestSstFilteringService.java | 6 +- 31 files changed, 149 insertions(+), 121 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java index b7bf7051caeb..95f05a50e064 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java @@ -192,7 +192,7 @@ public static OzoneSnapshot fromSnapshotInfo(SnapshotInfo snapshotInfo) { snapshotInfo.getSnapshotStatus(), snapshotInfo.getSnapshotId(), snapshotInfo.getSnapshotPath(), - snapshotInfo.getCheckpointDir(), + snapshotInfo.getCheckpointDirName(0), snapshotInfo.getReferencedSize(), snapshotInfo.getReferencedReplicatedSize(), snapshotInfo.getExclusiveSize() + snapshotInfo.getExclusiveSizeDeltaFromDirDeepCleaning(), diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java index 8980e28b59b4..16cf58ab5a2c 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.client; +import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.when; @@ -40,7 +41,6 @@ private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { when(snapshotInfo.getSnapshotStatus()).thenReturn(SNAPSHOT_ACTIVE); when(snapshotInfo.getSnapshotId()).thenReturn(snapshotId); when(snapshotInfo.getSnapshotPath()).thenReturn("volume/bucket"); - when(snapshotInfo.getCheckpointDir()).thenReturn("checkpointDir"); when(snapshotInfo.getReferencedSize()).thenReturn(1000L); when(snapshotInfo.getReferencedReplicatedSize()).thenReturn(3000L); when(snapshotInfo.getExclusiveSize()).thenReturn(4000L); @@ -57,7 +57,7 @@ public void testOzoneSnapshotFromSnapshotInfo() { OzoneSnapshot ozoneSnapshot = OzoneSnapshot.fromSnapshotInfo(snapshotInfo); OzoneSnapshot expectedOzoneSnapshot = new OzoneSnapshot( "volume", "bucket", "snap", 1000L, SNAPSHOT_ACTIVE, snapshotId, - "volume/bucket", "checkpointDir", 1000L, 3000L, 6000L, 18000L); + "volume/bucket", OM_SNAPSHOT_SEPARATOR + snapshotId, 1000L, 3000L, 6000L, 18000L); assertEquals(expectedOzoneSnapshot, ozoneSnapshot); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index cbc3709ea1e8..a26422cd81fb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -71,7 +71,6 @@ public final class SnapshotInfo implements Auditable, CopyObject { private UUID pathPreviousSnapshotId; private UUID globalPreviousSnapshotId; private String snapshotPath; // snapshot mask - private String checkpointDir; /** * RocksDB's transaction sequence number at the time of checkpoint creation. */ @@ -99,7 +98,6 @@ private SnapshotInfo(Builder b) { this.pathPreviousSnapshotId = b.pathPreviousSnapshotId; this.globalPreviousSnapshotId = b.globalPreviousSnapshotId; this.snapshotPath = b.snapshotPath; - this.checkpointDir = b.checkpointDir; this.dbTxSequenceNumber = b.dbTxSequenceNumber; this.deepClean = b.deepClean; this.sstFiltered = b.sstFiltered; @@ -150,10 +148,6 @@ public void setSnapshotPath(String snapshotPath) { this.snapshotPath = snapshotPath; } - public void setCheckpointDir(String checkpointDir) { - this.checkpointDir = checkpointDir; - } - public boolean isDeepCleaned() { return deepClean; } @@ -202,10 +196,6 @@ public String getSnapshotPath() { return snapshotPath; } - public String getCheckpointDir() { - return checkpointDir; - } - public boolean isSstFiltered() { return sstFiltered; } @@ -231,7 +221,6 @@ public SnapshotInfo.Builder toBuilder() { .setPathPreviousSnapshotId(pathPreviousSnapshotId) .setGlobalPreviousSnapshotId(globalPreviousSnapshotId) .setSnapshotPath(snapshotPath) - .setCheckpointDir(checkpointDir) .setDbTxSequenceNumber(dbTxSequenceNumber) .setDeepClean(deepClean) .setSstFiltered(sstFiltered) @@ -260,7 +249,6 @@ public static class Builder { private UUID pathPreviousSnapshotId; private UUID globalPreviousSnapshotId; private String snapshotPath; - private String checkpointDir; private long dbTxSequenceNumber; private boolean deepClean; private boolean sstFiltered; @@ -339,12 +327,6 @@ public Builder setSnapshotPath(String snapshotPath) { return this; } - /** @param checkpointDir - Snapshot checkpoint directory. */ - public Builder setCheckpointDir(String checkpointDir) { - this.checkpointDir = checkpointDir; - return this; - } - /** @param dbTxSequenceNumber - RDB latest transaction sequence number. */ public Builder setDbTxSequenceNumber(long dbTxSequenceNumber) { this.dbTxSequenceNumber = dbTxSequenceNumber; @@ -459,7 +441,6 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() { } sib.setSnapshotPath(snapshotPath) - .setCheckpointDir(checkpointDir) .setDbTxSequenceNumber(dbTxSequenceNumber) .setDeepClean(deepClean); return sib.build(); @@ -544,7 +525,6 @@ public static SnapshotInfo getFromProtobuf( } osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath()) - .setCheckpointDir(snapshotInfoProto.getCheckpointDir()) .setDbTxSequenceNumber(snapshotInfoProto.getDbTxSequenceNumber()); return osib.build(); @@ -562,17 +542,20 @@ public Map toAuditMap() { /** * Get the name of the checkpoint directory. */ - public static String getCheckpointDirName(UUID snapshotId) { + public static String getCheckpointDirName(UUID snapshotId, int version) { Objects.requireNonNull(snapshotId, "SnapshotId is needed to create checkpoint directory"); - return OM_SNAPSHOT_SEPARATOR + snapshotId; + if (version == 0) { + return OM_SNAPSHOT_SEPARATOR + snapshotId; + } + return OM_SNAPSHOT_SEPARATOR + snapshotId + OM_SNAPSHOT_SEPARATOR + version; } /** * Get the name of the checkpoint directory, (non-static). */ - public String getCheckpointDirName() { - return getCheckpointDirName(getSnapshotId()); + public String getCheckpointDirName(int version) { + return getCheckpointDirName(getSnapshotId(), version); } public long getDbTxSequenceNumber() { @@ -703,10 +686,6 @@ public static SnapshotInfo newInstance(String volumeName, .setBucketName(bucketName) .setDeepClean(false) .setDeepCleanedDeletedDir(false); - - if (snapshotId != null) { - builder.setCheckpointDir(getCheckpointDirName(snapshotId)); - } return builder.build(); } @@ -729,7 +708,6 @@ public boolean equals(Object o) { Objects.equals( globalPreviousSnapshotId, that.globalPreviousSnapshotId) && snapshotPath.equals(that.snapshotPath) && - checkpointDir.equals(that.checkpointDir) && deepClean == that.deepClean && sstFiltered == that.sstFiltered && referencedSize == that.referencedSize && @@ -746,7 +724,7 @@ public int hashCode() { return Objects.hash(snapshotId, name, volumeName, bucketName, snapshotStatus, creationTime, deletionTime, pathPreviousSnapshotId, - globalPreviousSnapshotId, snapshotPath, checkpointDir, + globalPreviousSnapshotId, snapshotPath, deepClean, sstFiltered, referencedSize, referencedReplicatedSize, exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir, lastTransactionInfo, createTransactionInfo); @@ -773,7 +751,6 @@ public String toString() { ", pathPreviousSnapshotId: '" + pathPreviousSnapshotId + '\'' + ", globalPreviousSnapshotId: '" + globalPreviousSnapshotId + '\'' + ", snapshotPath: '" + snapshotPath + '\'' + - ", checkpointDir: '" + checkpointDir + '\'' + ", dbTxSequenceNumber: '" + dbTxSequenceNumber + '\'' + ", deepClean: '" + deepClean + '\'' + ", sstFiltered: '" + sstFiltered + '\'' + diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java index 98cc035b3c07..7750b32e2e0a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java @@ -60,7 +60,6 @@ private SnapshotInfo createSnapshotInfo() { .setPathPreviousSnapshotId(PATH_PREVIOUS_SNAPSHOT_ID) .setGlobalPreviousSnapshotId(GLOBAL_PREVIOUS_SNAPSHOT_ID) .setSnapshotPath(SNAPSHOT_PATH) - .setCheckpointDir(CHECKPOINT_DIR) .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER) .setDeepClean(false) .setSstFiltered(false) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java index d02319a4cab6..6a97796af32b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java @@ -547,7 +547,7 @@ private String createSnapshot() throws Exception { SnapshotInfo snapshotInfo = ozoneManager.getMetadataManager() .getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(VOLUME, BUCKET, snapshotName)); - String snapshotDirName = getSnapshotPath(conf, snapshotInfo) + + String snapshotDirName = getSnapshotPath(conf, snapshotInfo, 0) + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils.waitFor(() -> new File(snapshotDirName).exists(), 1000, 100000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java index 5429dc0f4a12..9f69ed51b7ca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java @@ -130,7 +130,7 @@ public static void shutdown() { } private String getDBCheckpointAbsolutePath(SnapshotInfo snapshotInfo) { - return OmSnapshotManager.getSnapshotPath(conf, snapshotInfo); + return OmSnapshotManager.getSnapshotPath(conf, snapshotInfo, 0); } private static String getSnapshotDBKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index 3d542785e113..d0b38116d5fa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -733,7 +733,7 @@ private String createSnapshot(String vname, String bname) writeClient.createSnapshot(vname, bname, snapshotName); SnapshotInfo snapshotInfo = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(vname, bname, snapshotName)); - String snapshotPath = getSnapshotPath(conf, snapshotInfo) + String snapshotPath = getSnapshotPath(conf, snapshotInfo, 0) + OM_KEY_PREFIX; GenericTestUtils.waitFor(() -> new File(snapshotPath).exists(), 100, 30000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java index 0f5c8bae4b46..5ab71373dd4d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java @@ -228,7 +228,7 @@ public void write(int b) throws IOException { .thenReturn(lock); doCallRealMethod().when(omDbCheckpointServletMock).getCheckpoint(any(), anyBoolean()); assertNull(doCallRealMethod().when(omDbCheckpointServletMock).getBootstrapTempData()); - doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirs(any()); + doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirs(any(), any(), any()); doCallRealMethod().when(omDbCheckpointServletMock). processMetadataSnapshotRequest(any(), any(), anyBoolean(), anyBoolean()); doCallRealMethod().when(omDbCheckpointServletMock).writeDbDataToStream(any(), any(), any(), any()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index a1de8fc377a0..3609703c7ef6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -337,11 +337,11 @@ private void checkSnapshot(OzoneManager leaderOM, OzoneManager followerOM, File followerMetaDir = OMStorage.getOmDbDir(followerOM.getConfiguration()); Path followerActiveDir = Paths.get(followerMetaDir.toString(), OM_DB_NAME); Path followerSnapshotDir = - Paths.get(getSnapshotPath(followerOM.getConfiguration(), snapshotInfo)); + Paths.get(getSnapshotPath(followerOM.getConfiguration(), snapshotInfo, 0)); File leaderMetaDir = OMStorage.getOmDbDir(leaderOM.getConfiguration()); Path leaderActiveDir = Paths.get(leaderMetaDir.toString(), OM_DB_NAME); Path leaderSnapshotDir = - Paths.get(getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo)); + Paths.get(getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo, 0)); // Get list of live files on the leader. RocksDB activeRocksDB = ((RDBStore) leaderOM.getMetadataManager().getStore()) @@ -1056,7 +1056,7 @@ private SnapshotInfo createOzoneSnapshot(OzoneManager leaderOM, String name) .get(tableKey); // Allow the snapshot to be written to disk String fileName = - getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo); + getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo, 0); File snapshotDir = new File(fileName); if (!RDBCheckpointUtils .waitForCheckpointDirectoryExist(snapshotDir)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 93dba945d46d..19b237fe2600 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -1990,7 +1990,7 @@ private String createSnapshot(String volName, String buckName, .get(SnapshotInfo.getTableKey(volName, linkedBuckets.getOrDefault(buckName, buckName), snapshotName)); String snapshotDirName = OmSnapshotManager.getSnapshotPath(ozoneManager.getConfiguration(), - snapshotInfo) + OM_KEY_PREFIX + "CURRENT"; + snapshotInfo, 0) + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils .waitFor(() -> new File(snapshotDirName).exists(), 1000, 120000); return snapshotKeyPrefix; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java index fca8b137b720..964513702a08 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java @@ -709,7 +709,7 @@ private String createSnapshot(String snapshotName) SnapshotInfo snapshotInfo = ozoneManager.getMetadataManager() .getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(snapshot.getVolumeName(), snapshot.getBucketName(), snapshotName)); - String snapshotDirName = getSnapshotPath(conf, snapshotInfo) + + String snapshotDirName = getSnapshotPath(conf, snapshotInfo, 0) + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils.waitFor(() -> new File(snapshotDirName).exists(), 1000, 120000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java index bae852ae3368..b6008ab3d2e2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java @@ -410,7 +410,7 @@ private void createSnapshot(String volName, String buckName, String snapName) th String tableKey = SnapshotInfo.getTableKey(volName, buckName, snapName); SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(cluster.getOMLeader(), tableKey); - String fileName = getSnapshotPath(cluster.getOMLeader().getConfiguration(), snapshotInfo); + String fileName = getSnapshotPath(cluster.getOMLeader().getConfiguration(), snapshotInfo, 0); File snapshotDir = new File(fileName); if (!RDBCheckpointUtils.waitForCheckpointDirectoryExist(snapshotDir)) { throw new IOException("Snapshot directory doesn't exist"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java index f735ad15d295..455f1430d997 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java @@ -685,7 +685,7 @@ private void createSnapshot() .get(SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName)); // Allow the snapshot to be written to disk String fileName = - getSnapshotPath(ozoneManager.getConfiguration(), snapshotInfo); + getSnapshotPath(ozoneManager.getConfiguration(), snapshotInfo, 0); File snapshotDir = new File(fileName); if (!RDBCheckpointUtils .waitForCheckpointDirectoryExist(snapshotDir)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java index 6c67554d7b8d..b2fde1f01960 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java @@ -159,7 +159,7 @@ private String createSnapshot(String volName, String buckName, .getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volName, buckName, snapshotName)); String snapshotDirName = OmSnapshotManager - .getSnapshotPath(clientConf, snapshotInfo) + OM_KEY_PREFIX + "CURRENT"; + .getSnapshotPath(clientConf, snapshotInfo, 0) + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils.waitFor(() -> new File(snapshotDirName).exists(), 1000, 120000); return snapshotKeyPrefix; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java index a67a4599beee..eacde483d2ac 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java @@ -633,7 +633,7 @@ private SnapshotInfo createOzoneSnapshot(OzoneManager leaderOM, String name) thr .getSnapshotInfoTable() .get(tableKey); // Allow the snapshot to be written to disk - String fileName = getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo); + String fileName = getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo, 0); File snapshotDir = new File(fileName); if (!RDBCheckpointUtils.waitForCheckpointDirectoryExist(snapshotDir)) { throw new IOException("snapshot directory doesn't exist"); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java index 4d85e9f07472..efe9fc0aeea9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java @@ -69,6 +69,7 @@ import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; @@ -347,7 +348,8 @@ private Set getSnapshotDirs(DBCheckpoint checkpoint, boolean waitForDir) OzoneConfiguration conf = getConf(); Set snapshotPaths = new HashSet<>(); - + OzoneManager om = (OzoneManager) getServletContext().getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE); + OmSnapshotLocalDataManager snapshotLocalDataManager = om.getOmSnapshotManager().getSnapshotLocalDataManager(); // get snapshotInfo entries OmMetadataManagerImpl checkpointMetadataManager = OmMetadataManagerImpl.createCheckpointMetadataManager( @@ -359,11 +361,14 @@ private Set getSnapshotDirs(DBCheckpoint checkpoint, boolean waitForDir) // For each entry, wait for corresponding directory. while (iterator.hasNext()) { Table.KeyValue entry = iterator.next(); - Path path = Paths.get(getSnapshotPath(conf, entry.getValue())); - if (waitForDir) { - waitForDirToExist(path); + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataMetaProvider snapMetaProvider = + snapshotLocalDataManager.getOmSnapshotLocalDataMeta(entry.getValue())) { + Path path = Paths.get(getSnapshotPath(conf, entry.getValue(), snapMetaProvider.getMeta().getVersion())); + if (waitForDir) { + waitForDirToExist(path); + } + snapshotPaths.add(path); } - snapshotPaths.add(path); } } finally { checkpointMetadataManager.stop(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java index 27e7f1c2d6d6..88e018ace4d1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java @@ -27,6 +27,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY; +import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.includeSnapshotData; import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.logEstimatedTarballSize; @@ -69,7 +70,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -210,6 +210,7 @@ public void writeDbDataToStream(HttpServletRequest request, OutputStream destina DBCheckpoint checkpoint = null; OzoneManager om = (OzoneManager) getServletContext().getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE); OMMetadataManager omMetadataManager = om.getMetadataManager(); + OmSnapshotLocalDataManager snapshotLocalDataManager = om.getOmSnapshotManager().getSnapshotLocalDataManager(); boolean includeSnapshotData = includeSnapshotData(request); AtomicLong maxTotalSstSize = new AtomicLong(getConf().getLong(OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY, OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT)); @@ -219,7 +220,7 @@ public void writeDbDataToStream(HttpServletRequest request, OutputStream destina if (!includeSnapshotData) { maxTotalSstSize.set(Long.MAX_VALUE); } else { - snapshotPaths = getSnapshotDirs(omMetadataManager); + snapshotPaths = getSnapshotDirs(omMetadataManager, omMetadataManager, snapshotLocalDataManager); } if (sstFilesToExclude.isEmpty()) { @@ -382,18 +383,21 @@ private OzoneConfiguration getConf() { /** * Collects paths to all snapshot databases. * - * @param omMetadataManager OMMetadataManager instance + * @param activeOMMetadataManager OMMetadataManager instance * @return Set of paths to snapshot databases * @throws IOException if an I/O error occurs */ - Set getSnapshotDirs(OMMetadataManager omMetadataManager) throws IOException { + Set getSnapshotDirs(OMMetadataManager activeOMMetadataManager, OMMetadataManager omMetadataManager, + OmSnapshotLocalDataManager localDataManager) throws IOException { Set snapshotPaths = new HashSet<>(); SnapshotChainManager snapshotChainManager = new SnapshotChainManager(omMetadataManager); for (SnapshotChainInfo snapInfo : snapshotChainManager.getGlobalSnapshotChain().values()) { - String snapshotDir = - OmSnapshotManager.getSnapshotPath(getConf(), SnapshotInfo.getCheckpointDirName(snapInfo.getSnapshotId())); - Path path = Paths.get(snapshotDir); - snapshotPaths.add(path); + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataMetaProvider snapLocalMeta = + localDataManager.getOmSnapshotLocalDataMeta(snapInfo.getSnapshotId())) { + Path snapshotDir = getSnapshotPath(activeOMMetadataManager, + snapInfo.getSnapshotId(), snapLocalMeta.getMeta().getVersion()); + snapshotPaths.add(snapshotDir); + } } return snapshotPaths; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 7b9beb80cf6f..0954b029ab67 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -415,8 +415,12 @@ public OmSnapshot load(@Nonnull UUID snapshotId) throws IOException { "' with txnId : '" + TransactionInfo.fromByteString(snapshotInfo.getCreateTransactionInfo()) + "' has not been flushed yet. Please wait a few more seconds before retrying", TIMEOUT); } - snapshotMetadataManager = new OmMetadataManagerImpl(conf, - snapshotInfo.getCheckpointDirName(), maxOpenSstFilesInSnapshotDb); + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataMetaProvider snapshotLocalDataProvider = + snapshotLocalDataManager.getOmSnapshotLocalDataMeta(snapshotInfo)) { + snapshotMetadataManager = new OmMetadataManagerImpl(conf, + snapshotInfo.getCheckpointDirName(snapshotLocalDataProvider.getMeta().getVersion()), + maxOpenSstFilesInSnapshotDb); + } } catch (IOException e) { LOG.error("Failed to retrieve snapshot: {}", snapshotTableKey, e); throw e; @@ -505,14 +509,12 @@ public static DBCheckpoint createOmSnapshotCheckpoint( boolean snapshotDirExist = false; // Create DB checkpoint for snapshot - String checkpointPrefix = store.getDbLocation().getName(); - Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + snapshotInfo.getCheckpointDir()); + Path snapshotDirPath = getSnapshotPath(omMetadataManager, snapshotInfo, 0); if (Files.exists(snapshotDirPath)) { snapshotDirExist = true; dbCheckpoint = new RocksDBCheckpoint(snapshotDirPath); } else { - dbCheckpoint = store.getSnapshot(snapshotInfo.getCheckpointDirName()); + dbCheckpoint = store.getSnapshot(snapshotInfo.getCheckpointDirName(0)); } OmSnapshotManager omSnapshotManager = ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getOmSnapshotManager(); @@ -796,27 +798,23 @@ public static String getSnapshotPrefix(String snapshotName) { snapshotName + OM_KEY_PREFIX; } - public static Path getSnapshotPath(OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) { - RDBStore store = (RDBStore) omMetadataManager.getStore(); - String checkpointPrefix = store.getDbLocation().getName(); - return Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + snapshotInfo.getCheckpointDir()); + public static Path getSnapshotPath(OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo, int version) { + return getSnapshotPath(omMetadataManager, snapshotInfo.getSnapshotId(), version); } - public static Path getSnapshotPath(OMMetadataManager omMetadataManager, UUID snapshotId) { + public static Path getSnapshotPath(OMMetadataManager omMetadataManager, UUID snapshotId, int version) { RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); return Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + SnapshotInfo.getCheckpointDirName(snapshotId)); + checkpointPrefix + SnapshotInfo.getCheckpointDirName(snapshotId, version)); } public static String getSnapshotPath(OzoneConfiguration conf, - SnapshotInfo snapshotInfo) { - return getSnapshotPath(conf, snapshotInfo.getCheckpointDirName()); + SnapshotInfo snapshotInfo, int version) { + return getSnapshotPath(conf, snapshotInfo.getCheckpointDirName(version)); } - public static String getSnapshotPath(OzoneConfiguration conf, - String checkpointDirName) { + private static String getSnapshotPath(OzoneConfiguration conf, String checkpointDirName) { return OMStorage.getOmDbDir(conf) + OM_KEY_PREFIX + OM_SNAPSHOT_CHECKPOINT_DIR + OM_KEY_PREFIX + OM_DB_NAME + checkpointDirName; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 87f6ff55bb71..0bc0a6cd0abf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -23,6 +23,7 @@ import com.google.common.annotations.VisibleForTesting; import java.io.IOException; +import java.nio.file.Path; import java.util.Collections; import java.util.Iterator; import java.util.Optional; @@ -79,6 +80,7 @@ public class SnapshotDefragService extends BackgroundService private final AtomicBoolean running; private final MultiSnapshotLocks snapshotIdLocks; + private final OzoneConfiguration conf; private final BootstrapStateHandler.Lock lock = new BootstrapStateHandler.Lock(); @@ -90,6 +92,7 @@ public SnapshotDefragService(long interval, TimeUnit unit, long serviceTimeout, this.snapshotLimitPerTask = configuration .getLong(SNAPSHOT_DEFRAG_LIMIT_PER_TASK, SNAPSHOT_DEFRAG_LIMIT_PER_TASK_DEFAULT); + this.conf = configuration; snapshotsDefraggedCount = new AtomicLong(0); running = new AtomicBoolean(false); IOzoneManagerLock omLock = ozoneManager.getMetadataManager().getLock(); @@ -128,11 +131,14 @@ private boolean isRocksToolsNativeLibAvailable() { * Checks if a snapshot needs defragmentation by examining its YAML metadata. */ private boolean needsDefragmentation(SnapshotInfo snapshotInfo) { - String snapshotPath = OmSnapshotManager.getSnapshotPath( - ozoneManager.getConfiguration(), snapshotInfo); - + if (!SstFilteringService.isSstFiltered(conf, snapshotInfo)) { + return false; + } try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider readableOmSnapshotLocalDataProvider = ozoneManager.getOmSnapshotManager().getSnapshotLocalDataManager().getOmSnapshotLocalData(snapshotInfo)) { + Path snapshotPath = OmSnapshotManager.getSnapshotPath( + ozoneManager.getMetadataManager(), snapshotInfo, + readableOmSnapshotLocalDataProvider.getSnapshotLocalData().getVersion()); // Read snapshot local metadata from YAML OmSnapshotLocalData snapshotLocalData = readableOmSnapshotLocalDataProvider.getSnapshotLocalData(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index 522ea7df6de5..4b5002eb6c4a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -87,7 +87,7 @@ public class SstFilteringService extends BackgroundService public static boolean isSstFiltered(OzoneConfiguration ozoneConfiguration, SnapshotInfo snapshotInfo) { Path sstFilteredFile = Paths.get(OmSnapshotManager.getSnapshotPath(ozoneConfiguration, - snapshotInfo), SST_FILTERED_FILE); + snapshotInfo, 0), SST_FILTERED_FILE); return snapshotInfo.isSstFiltered() || sstFilteredFile.toFile().exists(); } @@ -138,7 +138,8 @@ private void markSSTFilteredFlagForSnapshot(SnapshotInfo snapshotInfo) throws IO .acquireReadLock(SNAPSHOT_DB_LOCK, snapshotInfo.getSnapshotId().toString()); boolean acquiredSnapshotLock = omLockDetails.isLockAcquired(); if (acquiredSnapshotLock) { - String snapshotDir = OmSnapshotManager.getSnapshotPath(ozoneManager.getConfiguration(), snapshotInfo); + // Ensure snapshot is sstFiltered before defrag. + String snapshotDir = OmSnapshotManager.getSnapshotPath(ozoneManager.getConfiguration(), snapshotInfo, 0); try { // mark the snapshot as filtered by creating a file. if (Files.exists(Paths.get(snapshotDir))) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 3797b3fcf2eb..407da697b1da 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -119,7 +119,7 @@ private void updateSnapInfo(OmMetadataManagerImpl metadataManager, * Deletes the checkpoint directory for a snapshot. */ private void deleteCheckpointDirectory(OmSnapshotLocalDataManager snapshotLocalDataManager, - OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) { + OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) throws IOException { // Acquiring write lock to avoid race condition with sst filtering service which creates a sst filtered file // inside the snapshot directory. Any operation apart which doesn't create/delete files under this snapshot // directory can run in parallel along with this operation. @@ -127,14 +127,18 @@ private void deleteCheckpointDirectory(OmSnapshotLocalDataManager snapshotLocalD .acquireWriteLock(SNAPSHOT_DB_LOCK, snapshotInfo.getSnapshotId().toString()); boolean acquiredSnapshotLock = omLockDetails.isLockAcquired(); if (acquiredSnapshotLock) { - Path snapshotDirPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo); - try { - FileUtils.deleteDirectory(snapshotDirPath.toFile()); - } catch (IOException ex) { - LOG.error("Failed to delete snapshot directory {} for snapshot {}", - snapshotDirPath, snapshotInfo.getTableKey(), ex); - } finally { - omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_DB_LOCK, snapshotInfo.getSnapshotId().toString()); + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataMetaProvider snapMetaProvider = + snapshotLocalDataManager.getOmSnapshotLocalDataMeta(snapshotInfo)) { + Path snapshotDirPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo, + snapMetaProvider.getMeta().getVersion()); + try { + FileUtils.deleteDirectory(snapshotDirPath.toFile()); + } catch (IOException ex) { + LOG.error("Failed to delete snapshot directory {} for snapshot {}", + snapshotDirPath, snapshotInfo.getTableKey(), ex); + } finally { + omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_DB_LOCK, snapshotInfo.getSnapshotId().toString()); + } } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index bfe74822476d..54dfc0477827 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -127,7 +127,7 @@ public String getSnapshotLocalPropertyYamlPath(SnapshotInfo snapshotInfo) { } public String getSnapshotLocalPropertyYamlPath(UUID snapshotId) { - Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId); + Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId, 0); return getSnapshotLocalPropertyYamlPath(snapshotPath); } @@ -146,6 +146,14 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf } } + public ReadableOmSnapshotLocalDataMetaProvider getOmSnapshotLocalDataMeta(SnapshotInfo snapInfo) throws IOException { + return getOmSnapshotLocalDataMeta(snapInfo.getSnapshotId()); + } + + public ReadableOmSnapshotLocalDataMetaProvider getOmSnapshotLocalDataMeta(UUID snapshotId) throws IOException { + return new ReadableOmSnapshotLocalDataMetaProvider(snapshotId); + } + public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { return getOmSnapshotLocalData(snapshotInfo.getSnapshotId()); } @@ -307,6 +315,15 @@ public void close() { } } + private HierarchicalResourceLock acquireLock(UUID snapId, boolean readLock) throws IOException { + HierarchicalResourceLock acquiredLock = readLock ? locks.acquireReadLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, + snapId.toString()) : locks.acquireWriteLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, snapId.toString()); + if (!acquiredLock.isLockAcquired()) { + throw new IOException("Unable to acquire lock for snapshotId: " + snapId); + } + return acquiredLock; + } + private static final class LockDataProviderInitResult { private final OmSnapshotLocalData snapshotLocalData; private final HierarchicalResourceLock lock; @@ -338,6 +355,34 @@ private OmSnapshotLocalData getSnapshotLocalData() { } } + /** + * Provides LocalData's metadata stored in memory for a snapshot after acquiring a read lock on this. + */ + public final class ReadableOmSnapshotLocalDataMetaProvider implements AutoCloseable { + private final SnapshotVersionsMeta meta; + private final HierarchicalResourceLock lock; + private boolean closed; + + private ReadableOmSnapshotLocalDataMetaProvider(UUID snapshotId) throws IOException { + this.lock = acquireLock(snapshotId, true); + this.meta = versionNodeMap.get(snapshotId); + this.closed = false; + } + + public synchronized SnapshotVersionsMeta getMeta() throws IOException { + if (closed) { + throw new IOException("Resource has already been closed."); + } + return meta; + } + + @Override + public synchronized void close() throws IOException { + closed = true; + lock.close(); + } + } + /** * The ReadableOmSnapshotLocalDataProvider class is responsible for managing the * access and initialization of local snapshot data in a thread-safe manner. @@ -420,15 +465,6 @@ public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IO return previousSnapshotLocalData; } - private HierarchicalResourceLock acquireLock(UUID snapId, boolean readLock) throws IOException { - HierarchicalResourceLock acquiredLock = readLock ? locks.acquireReadLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, - snapId.toString()) : locks.acquireWriteLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, snapId.toString()); - if (!acquiredLock.isLockAcquired()) { - throw new IOException("Unable to acquire lock for snapshotId: " + snapId); - } - return acquiredLock; - } - /** * Intializes the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the * snapshotId to be resolved by iterating through the chain of previous snapshot ids. @@ -761,7 +797,10 @@ public String toString() { } } - static final class SnapshotVersionsMeta { + /** + * Class that encapsulates the metadata corresponding to a snapshot's local data. + */ + public static final class SnapshotVersionsMeta { private final UUID previousSnapshotId; private final Map snapshotVersions; private int version; @@ -783,16 +822,16 @@ private Map getVersionNodes(OmSnapshotLocalData s return versionNodes; } - UUID getPreviousSnapshotId() { + public UUID getPreviousSnapshotId() { return previousSnapshotId; } - int getVersion() { + public int getVersion() { return version; } - Map getSnapshotVersions() { - return snapshotVersions; + private Map getSnapshotVersions() { + return Collections.unmodifiableMap(snapshotVersions); } LocalDataVersionNode getVersionNode(int snapshotVersion) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 6ec49935b356..f6b8df609d99 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -314,7 +314,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { when(mockedDb.getLiveFilesMetaData()).thenReturn(mockedLiveFiles); Path snapshotYaml = Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); - when(mockedStore.getDbLocation()).thenReturn(getSnapshotPath(omMetadataManager, snapshotInfo).toFile()); + when(mockedStore.getDbLocation()).thenReturn(getSnapshotPath(omMetadataManager, snapshotInfo, 0).toFile()); // Create an existing YAML file for the snapshot assertTrue(snapshotYaml.toFile().createNewFile()); assertEquals(0, Files.size(snapshotYaml)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index 2cafae138fd4..6bef4b84247b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -131,7 +131,7 @@ public void testAddToDBBatch(int numberOfKeys) throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); // Confirm snapshot directory was created - String snapshotDir = getSnapshotPath(ozoneConfiguration, snapshotInfo); + String snapshotDir = getSnapshotPath(ozoneConfiguration, snapshotInfo, 0); assertTrue((new File(snapshotDir)).exists()); // Confirm table has 1 entry diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java index 2d5d7b2870f7..bdb23b65f2c8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java @@ -117,7 +117,7 @@ public void testAddToDBBatch() throws Exception { // Confirm snapshot directory was created String snapshotDir = OmSnapshotManager.getSnapshotPath(ozoneConfiguration, - snapshotInfo); + snapshotInfo, 0); assertTrue((new File(snapshotDir)).exists()); // Confirm table has 1 entry diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 947c1a4b7f47..43ebe6fbeb5b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -250,7 +250,7 @@ private List createSnapshotLocalData(OmSnapshotLocalDataManager snapshotLo private void mockSnapshotStore(UUID snapshotId, List sstFiles) throws RocksDatabaseException { // Setup snapshot store mock - File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); + File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId, 0).toFile(); assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs()); when(snapshotStore.getDbLocation()).thenReturn(snapshotDbLocation); @@ -580,7 +580,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); // Setup snapshot store mock - File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); + File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId, 0).toFile(); assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs()); List sstFiles = new ArrayList<>(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java index 3f53a66f4f95..e62b64893254 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java @@ -93,7 +93,6 @@ private SnapshotInfo createSnapshotInfo(UUID snapshotID, .setPathPreviousSnapshotId(pathPrevID) .setGlobalPreviousSnapshotId(globalPrevID) .setSnapshotPath(String.join("/", "vol1", "bucket1")) - .setCheckpointDir("checkpoint.testdir") .build(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 0ea625a0e064..ec896cb3dda3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -289,7 +289,6 @@ public void init() throws RocksDBException, IOException, ExecutionException { .setBucketName(BUCKET_NAME) .setName(baseSnapshotName) .setSnapshotPath(snapshotPath) - .setCheckpointDir(snapshotCheckpointDir) .build(); for (JobStatus jobStatus : jobStatuses) { @@ -302,7 +301,6 @@ public void init() throws RocksDBException, IOException, ExecutionException { .setBucketName(BUCKET_NAME) .setName(targetSnapshotName) .setSnapshotPath(snapshotPath) - .setCheckpointDir(snapshotCheckpointDir) .build(); SnapshotDiffJob diffJob = new SnapshotDiffJob(System.currentTimeMillis(), @@ -1395,7 +1393,6 @@ public void testThreadPoolIsFull(String description, .setBucketName(BUCKET_NAME) .setName(snapshotName) .setSnapshotPath("fromSnapshotPath") - .setCheckpointDir("fromSnapshotCheckpointDir") .build(); snapshotInfos.add(snapInfo); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java index ca27d9bc8938..a39d907038fb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java @@ -75,7 +75,6 @@ private SnapshotInfo createSnapshotInfo() { .setPathPreviousSnapshotId(EXPECTED_PREVIOUS_SNAPSHOT_ID) .setGlobalPreviousSnapshotId(EXPECTED_PREVIOUS_SNAPSHOT_ID) .setSnapshotPath("test/path") - .setCheckpointDir("checkpoint.testdir") .build(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java index d9e81693dd8d..e7c98e298b18 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java @@ -225,7 +225,7 @@ protected Path createSnapshotCheckpoint(String volume, String bucket, String sna RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + snapshotInfo.getCheckpointDir()); + checkpointPrefix + SnapshotInfo.getCheckpointDirName(snapshotInfo.getSnapshotId(), 0)); // Check the DB is still there assertTrue(Files.exists(snapshotDirPath)); return snapshotDirPath; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java index e523f32ef7e2..108dd30c8222 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java @@ -219,7 +219,7 @@ public void testIrrelevantSstFileDeletion() .get(SnapshotInfo.getTableKey(volumeName, bucketName2, snapshotName1)); String snapshotDirName = - OmSnapshotManager.getSnapshotPath(conf, snapshotInfo); + OmSnapshotManager.getSnapshotPath(conf, snapshotInfo, 0); for (LiveFileMetaData file : allFiles) { //Skipping the previous files from this check even those also works. @@ -294,11 +294,11 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { SnapshotInfo snapshot1Info = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volumeName, bucketNames.get(0), "snap1")); File snapshot1Dir = - new File(OmSnapshotManager.getSnapshotPath(conf, snapshot1Info)); + new File(OmSnapshotManager.getSnapshotPath(conf, snapshot1Info, 0)); SnapshotInfo snapshot2Info = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volumeName, bucketNames.get(0), "snap2")); File snapshot2Dir = - new File(OmSnapshotManager.getSnapshotPath(conf, snapshot2Info)); + new File(OmSnapshotManager.getSnapshotPath(conf, snapshot2Info, 0)); File snap1Current = new File(snapshot1Dir, "CURRENT"); File snap2Current = new File(snapshot2Dir, "CURRENT"); From 36b6fb357de7668aa946b516dde375eb6ab4aca5 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 28 Oct 2025 08:01:07 -0400 Subject: [PATCH 074/126] HDDS-13830. Add test Change-Id: Ica7f0f89f8e9f5c6531709c949b6e9fa6b7c4da1 --- .../ozone/om/TestOmSnapshotManager.java | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index f6b8df609d99..116d78019b75 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -42,7 +42,9 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -95,6 +97,9 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.MockedStatic; import org.rocksdb.LiveFileMetaData; import org.slf4j.event.Level; @@ -741,6 +746,43 @@ void testProcessFileWithDestDirParameter(@TempDir File testDir) throws IOExcepti destAddNonSstToCopiedFiles); } + @ParameterizedTest + @ValueSource(ints = {0, 1, 10, 100}) + public void testGetSnapshotPath(int version) { + OMMetadataManager metadataManager = mock(OMMetadataManager.class); + RDBStore store = mock(RDBStore.class); + when(metadataManager.getStore()).thenReturn(store); + File file = new File("test-db"); + when(store.getDbLocation()).thenReturn(file); + String path = "dir1/dir2"; + when(store.getSnapshotsParentDir()).thenReturn(path); + UUID snapshotId = UUID.randomUUID(); + String snapshotPath = OmSnapshotManager.getSnapshotPath(metadataManager, snapshotId, version).toString(); + String expectedPath = "dir1/dir2/test-db-" + snapshotId; + if (version != 0) { + expectedPath = expectedPath + "-" + version; + } + assertEquals(expectedPath, snapshotPath); + } + + @ParameterizedTest + @ValueSource(ints = {0, 1, 10, 100}) + public void testGetSnapshotPathFromConf(int version) { + try (MockedStatic mocked = mockStatic(OMStorage.class)) { + String omDir = "dir1/dir2"; + mocked.when(() -> OMStorage.getOmDbDir(any())).thenReturn(new File(omDir)); + OzoneConfiguration conf = mock(OzoneConfiguration.class); + SnapshotInfo snapshotInfo = createSnapshotInfo("volumeName", "bucketname"); + String snapshotPath = OmSnapshotManager.getSnapshotPath(conf, snapshotInfo, version); + String expectedPath = omDir + OM_KEY_PREFIX + OM_SNAPSHOT_CHECKPOINT_DIR + OM_KEY_PREFIX + + OM_DB_NAME + "-" + snapshotInfo.getSnapshotId(); + if (version != 0) { + expectedPath = expectedPath + "-" + version; + } + assertEquals(expectedPath, snapshotPath); + } + } + @Test public void testCreateSnapshotIdempotent() throws Exception { // set up db tables From 45963867aa8e516544f04dec14966448402e1a35 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 29 Oct 2025 01:07:29 -0400 Subject: [PATCH 075/126] HDDS-13783. Add comments for localDataGraph Change-Id: I1bb9832e7e8c40deeccb9d0868eaf5772f39b7f9 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index bfe74822476d..3e92eb6748ce 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -74,6 +74,23 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); private final ObjectSerializer snapshotLocalDataSerializer; + // In-memory DAG of snapshot-version dependencies. Each node represents a + // specific (snapshotId, version) pair, and a directed edge points to the + // corresponding (previousSnapshotId, previousSnapshotVersion) it depends on. + // The durable state is stored in each snapshot's YAML (previousSnapshotId and + // VersionMeta.previousSnapshotVersion). This graph mirrors that persisted + // structure to validate adds/removes and to resolve versions across chains. + // This graph is maintained only in memory and is not persisted to disk. + // Example (linear chain, arrows point to previous): + // (S0, v1) <- (S1, v4) <- (S2, v5) <- (S3, v7) + // where each node is (snapshotId, version) and each arrow points to its + // corresponding (previousSnapshotId, previousSnapshotVersion) dependency. + // + // Example (multiple versions for a single snapshotId S2): + // (S1, v4) <- (S2, v6) <- (S3, v8) + // (S1, v3) <- (S2, v5) + // Here S2 has two distinct versions (v6 and v5), each represented as its own + // node, and each version can depend on a different previousSnapshotVersion on S1. private final MutableGraph localDataGraph; private final Map versionNodeMap; private final OMMetadataManager omMetadataManager; From fd4bfdb2213f3372712cca212d346df6797fd450 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 30 Oct 2025 17:43:29 -0400 Subject: [PATCH 076/126] HDDS-13785. Add test for handling needs defrag Change-Id: I2af63982b9e5e9c42fbb54ac39366d2d66e563d2 --- .../ozone/om/SnapshotDefragService.java | 5 ++- .../snapshot/OmSnapshotLocalDataManager.java | 18 ++++++++-- .../TestOmSnapshotLocalDataManager.java | 33 +++++++++++++++++-- 3 files changed, 49 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 87f6ff55bb71..799ac6a40773 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -137,9 +137,8 @@ private boolean needsDefragmentation(SnapshotInfo snapshotInfo) { OmSnapshotLocalData snapshotLocalData = readableOmSnapshotLocalDataProvider.getSnapshotLocalData(); // Check if snapshot needs compaction (defragmentation) - boolean needsDefrag = snapshotLocalData.getNeedsDefrag(); - LOG.debug("Snapshot {} needsDefragmentation field value: {}", - snapshotInfo.getName(), needsDefrag); + boolean needsDefrag = readableOmSnapshotLocalDataProvider.needsDefrag(); + LOG.debug("Snapshot {} needsDefragmentation field value: {}", snapshotInfo.getName(), needsDefrag); return needsDefrag; } catch (IOException e) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index b081317cc57e..f38f1148ff76 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -170,8 +170,8 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf try (WritableOmSnapshotLocalDataProvider snapshotLocalData = new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(), () -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), - OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId(), - null), + OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), + snapshotInfo.getPathPreviousSnapshotId(), null), null))) { snapshotLocalData.commit(); } @@ -674,6 +674,19 @@ private LockDataProviderInitResult initialize( } } + public boolean needsDefrag() { + if (snapshotLocalData.getNeedsDefrag()) { + return true; + } + if (resolvedPreviousSnapshotId != null) { + int snapshotVersion = snapshotLocalData.getVersion(); + int previousResolvedSnapshotVersion = snapshotLocalData.getVersionSstFileInfos().get(snapshotVersion) + .getPreviousSnapshotVersion(); + return previousResolvedSnapshotVersion < getVersionNodeMap().get(resolvedPreviousSnapshotId).getVersion(); + } + return false; + } + @Override public void close() throws IOException { if (previousLock != null) { @@ -756,6 +769,7 @@ public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { OmSnapshotLocalData previousSnapshotLocalData = getPreviousSnapshotLocalData(); this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, previousSnapshotLocalData == null ? 0 : previousSnapshotLocalData.getVersion()); + this.getSnapshotLocalData().setNeedsDefrag(false); // Set Dirty if a version is added. setDirty(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index d4233591be34..cd442b9cdd38 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -31,6 +31,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.reset; @@ -124,7 +125,7 @@ public class TestOmSnapshotLocalDataManager { private AutoCloseable mocks; private File snapshotsDir; - private MockedStatic snapshotUtilMock; + private MockedStatic snapshotUtilMock; private static final String READ_LOCK_MESSAGE_ACQUIRE = "readLock acquire"; private static final String READ_LOCK_MESSAGE_UNLOCK = "readLock unlock"; @@ -169,7 +170,7 @@ public void setUp() throws IOException { when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); when(rdbStore.getDbLocation()).thenReturn(dbLocation); - this.snapshotUtilMock = mockStatic(SnapshotUtils.class); + this.snapshotUtilMock = mockStatic(OmSnapshotManager.class, CALLS_REAL_METHODS); purgedSnapshotIdMap.clear(); snapshotUtilMock.when(() -> OmSnapshotManager.isSnapshotPurged(any(), any(), any(), any())) .thenAnswer(i -> purgedSnapshotIdMap.getOrDefault(i.getArgument(2), false)); @@ -519,6 +520,32 @@ public void testOrphanVersionDeletionWithChainUpdate(boolean purgeSnapshot) thro } } + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testWriteWithChainUpdate(boolean previousSnapshotExisting) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + List snapshotIds = createSnapshotLocalData(localDataManager, 3 + (previousSnapshotExisting ? 1 : 0)); + int snapshotIdx = 1 + (previousSnapshotExisting ? 1 : 0); + for (UUID snapshotId : snapshotIds) { + addVersionsToLocalData(localDataManager, snapshotId, ImmutableMap.of(1, 1)); + } + + UUID snapshotId = snapshotIds.get(snapshotIdx); + UUID toUpdatePreviousSnapshotId = snapshotIdx - 2 >= 0 ? snapshotIds.get(snapshotIdx - 2) : null; + + try (WritableOmSnapshotLocalDataProvider snap = + localDataManager.getWritableOmSnapshotLocalData(snapshotId, toUpdatePreviousSnapshotId)) { + assertFalse(snap.needsDefrag()); + snap.commit(); + assertTrue(snap.needsDefrag()); + } + try (ReadableOmSnapshotLocalDataProvider snap = + localDataManager.getOmSnapshotLocalData(snapshotId)) { + assertEquals(toUpdatePreviousSnapshotId, snap.getSnapshotLocalData().getPreviousSnapshotId()); + assertTrue(snap.needsDefrag()); + } + } + /** * Validates write-time version propagation and removal rules when the previous * snapshot already has a concrete version recorded. @@ -717,6 +744,8 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { OmSnapshotLocalData.VersionMeta expectedVersionMeta = new OmSnapshotLocalData.VersionMeta(0, sstFileInfos); assertEquals(expectedVersionMeta, versionMeta); + // New Snapshot create needs to be defragged always. + assertTrue(snapshotLocalData.needsDefrag()); } } From 8a297369c4f9da73e6990662d3968034c03b6096 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 30 Oct 2025 17:46:11 -0400 Subject: [PATCH 077/126] HDDS-13833. Fix checkstyle Change-Id: I90e886ac5ca954cdf4fa7110b175168b2facac4b --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 3806fda7ed57..3411c4879ddc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -158,8 +158,8 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf try (WritableOmSnapshotLocalDataProvider snapshotLocalData = new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(), () -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), - OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId(), - null), + OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), + snapshotInfo.getPathPreviousSnapshotId(), null), null))) { snapshotLocalData.commit(); } From a810cc1b4b13636040b0bf248290382f5eccf2e1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 30 Oct 2025 18:45:20 -0400 Subject: [PATCH 078/126] HDDS-13785. Fix findbugs Change-Id: Ic2aa9091a9c463bb238b5af6c347df3d080b28f9 --- .../java/org/apache/hadoop/ozone/om/SnapshotDefragService.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 799ac6a40773..212953cd874c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -134,8 +134,6 @@ private boolean needsDefragmentation(SnapshotInfo snapshotInfo) { try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider readableOmSnapshotLocalDataProvider = ozoneManager.getOmSnapshotManager().getSnapshotLocalDataManager().getOmSnapshotLocalData(snapshotInfo)) { // Read snapshot local metadata from YAML - OmSnapshotLocalData snapshotLocalData = readableOmSnapshotLocalDataProvider.getSnapshotLocalData(); - // Check if snapshot needs compaction (defragmentation) boolean needsDefrag = readableOmSnapshotLocalDataProvider.needsDefrag(); LOG.debug("Snapshot {} needsDefragmentation field value: {}", snapshotInfo.getName(), needsDefrag); From 78c103629bd83b748767fcc210becc6b77b177be Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 30 Oct 2025 22:17:02 -0400 Subject: [PATCH 079/126] HDDS-13859. OmSnapshotLocalDataManager should handle needsDefrag flag in the yaml file Change-Id: Id4e0082b42ded19a5b05418555478c5a71ae2d1a --- .../ozone/om/SnapshotDefragService.java | 7 +-- .../snapshot/OmSnapshotLocalDataManager.java | 17 +++++++ .../TestOmSnapshotLocalDataManager.java | 49 +++++++++++++++++++ 3 files changed, 68 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 87f6ff55bb71..212953cd874c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -134,12 +134,9 @@ private boolean needsDefragmentation(SnapshotInfo snapshotInfo) { try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider readableOmSnapshotLocalDataProvider = ozoneManager.getOmSnapshotManager().getSnapshotLocalDataManager().getOmSnapshotLocalData(snapshotInfo)) { // Read snapshot local metadata from YAML - OmSnapshotLocalData snapshotLocalData = readableOmSnapshotLocalDataProvider.getSnapshotLocalData(); - // Check if snapshot needs compaction (defragmentation) - boolean needsDefrag = snapshotLocalData.getNeedsDefrag(); - LOG.debug("Snapshot {} needsDefragmentation field value: {}", - snapshotInfo.getName(), needsDefrag); + boolean needsDefrag = readableOmSnapshotLocalDataProvider.needsDefrag(); + LOG.debug("Snapshot {} needsDefragmentation field value: {}", snapshotInfo.getName(), needsDefrag); return needsDefrag; } catch (IOException e) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 3411c4879ddc..ef789aa8e1dd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -573,6 +573,19 @@ private LockDataProviderInitResult initialize( } } + public boolean needsDefrag() { + if (snapshotLocalData.getNeedsDefrag()) { + return true; + } + if (resolvedPreviousSnapshotId != null) { + int snapshotVersion = snapshotLocalData.getVersion(); + int previousResolvedSnapshotVersion = snapshotLocalData.getVersionSstFileInfos().get(snapshotVersion) + .getPreviousSnapshotVersion(); + return previousResolvedSnapshotVersion < getVersionNodeMap().get(resolvedPreviousSnapshotId).getVersion(); + } + return false; + } + @Override public void close() throws IOException { if (previousLock != null) { @@ -640,6 +653,9 @@ private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLo if (existingVersionsMeta == null || !Objects.equals(versionsToBeAdded.getPreviousSnapshotId(), existingVersionsMeta.getPreviousSnapshotId())) { setDirty(); + // Set the needsDefrag if the new previous snapshotId is different from the existing one or if this is a new + // snapshot yaml file. + snapshotLocalData.setNeedsDefrag(true); } return versionsToBeAdded; } finally { @@ -652,6 +668,7 @@ public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { OmSnapshotLocalData previousSnapshotLocalData = getPreviousSnapshotLocalData(); this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, previousSnapshotLocalData == null ? 0 : previousSnapshotLocalData.getVersion()); + this.getSnapshotLocalData().setNeedsDefrag(false); // Set Dirty if a version is added. setDirty(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index bfaa48c04feb..869c76270c7c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -431,6 +432,32 @@ private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManage } } + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testWriteWithChainUpdate(boolean previousSnapshotExisting) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 3 + (previousSnapshotExisting ? 1 : 0)); + int snapshotIdx = 1 + (previousSnapshotExisting ? 1 : 0); + for (UUID snapshotId : snapshotIds) { + addVersionsToLocalData(localDataManager, snapshotId, ImmutableMap.of(1, 1)); + } + + UUID snapshotId = snapshotIds.get(snapshotIdx); + UUID toUpdatePreviousSnapshotId = snapshotIdx - 2 >= 0 ? snapshotIds.get(snapshotIdx - 2) : null; + + try (WritableOmSnapshotLocalDataProvider snap = + localDataManager.getWritableOmSnapshotLocalData(snapshotId, toUpdatePreviousSnapshotId)) { + assertFalse(snap.needsDefrag()); + snap.commit(); + assertTrue(snap.needsDefrag()); + } + try (ReadableOmSnapshotLocalDataProvider snap = + localDataManager.getOmSnapshotLocalData(snapshotId)) { + assertEquals(toUpdatePreviousSnapshotId, snap.getSnapshotLocalData().getPreviousSnapshotId()); + assertTrue(snap.needsDefrag()); + } + } + /** * Validates write-time version propagation and removal rules when the previous * snapshot already has a concrete version recorded. @@ -531,6 +558,26 @@ private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalData } } + @ParameterizedTest + @ValueSource(ints = {1, 2, 3}) + public void testNeedsDefrag(int previousVersion) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 2); + for (UUID snapshotId : snapshotIds) { + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapshotId)) { + assertTrue(snap.needsDefrag()); + } + } + addVersionsToLocalData(localDataManager, snapshotIds.get(0), ImmutableMap.of(1, 1, 2, 2, 3, 3)); + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapshotIds.get(0))) { + assertFalse(snap.needsDefrag()); + } + addVersionsToLocalData(localDataManager, snapshotIds.get(1), ImmutableMap.of(1, 3, 2, previousVersion)); + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapshotIds.get(1))) { + assertEquals(previousVersion < snap.getPreviousSnapshotLocalData().getVersion(), snap.needsDefrag()); + } + } + @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionResolution(boolean read) throws IOException { @@ -629,6 +676,8 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { OmSnapshotLocalData.VersionMeta expectedVersionMeta = new OmSnapshotLocalData.VersionMeta(0, sstFileInfos); assertEquals(expectedVersionMeta, versionMeta); + // New Snapshot create needs to be defragged always. + assertTrue(snapshotLocalData.needsDefrag()); } } From bf4746f1e3f4eb3ee6e28a8a41d0705127253e07 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 07:33:35 -0400 Subject: [PATCH 080/126] HDDS-13859. Fix Test Change-Id: Ib1ab4c81e920f1465c3e9046092cdbf0727bcf7d --- .../java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 6ec49935b356..73f9e2863be3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -330,7 +330,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); assertFalse(localData.getSstFiltered()); assertEquals(0L, localData.getLastDefragTime()); - assertFalse(localData.getNeedsDefrag()); + assertTrue(localData.getNeedsDefrag()); assertEquals(1, localData.getVersionSstFileInfos().size()); // Cleanup From 09d955c199dfb7f646d2bd8697b6064a7f058b00 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 07:49:08 -0400 Subject: [PATCH 081/126] HDDS-13859. Add comments Change-Id: Iaea152b0b69dcdd52a6f3806c713ee2b48d3875c --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index ef789aa8e1dd..40e4bf678db0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -668,6 +668,7 @@ public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { OmSnapshotLocalData previousSnapshotLocalData = getPreviousSnapshotLocalData(); this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, previousSnapshotLocalData == null ? 0 : previousSnapshotLocalData.getVersion()); + // Adding a new snapshot version means it has been defragged thus the flag needs to be reset. this.getSnapshotLocalData().setNeedsDefrag(false); // Set Dirty if a version is added. setDirty(); From 2cf1bce8c8d2546b51ef06b15eb910370da46816 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 08:48:08 -0400 Subject: [PATCH 082/126] HDDS-13859. Fix test after merge master Change-Id: I6527b98cd16850079b2f3855f4bfb5a5b59fa3ee --- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index cc39d263bdd2..8554d1684e26 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -691,7 +691,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); assertFalse(localData.getSstFiltered()); assertEquals(0L, localData.getLastDefragTime()); - assertFalse(localData.getNeedsDefrag()); + assertTrue(localData.getNeedsDefrag()); assertEquals(1, localData.getVersionSstFileInfos().size()); } From 5849dacbd5760f4166038f9b4aac2b6f5dfa0c0f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 08:56:52 -0400 Subject: [PATCH 083/126] HDDS-13785. Fix tests after merge Change-Id: Ia67b9db375bc694bacce4e4e87d9d04906c3b01c --- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 5816306e7ea2..006c946abacf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.om.snapshot; +import static org.apache.hadoop.hdds.StringUtils.bytes2String; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; @@ -651,7 +652,7 @@ private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalData @ParameterizedTest @ValueSource(ints = {1, 2, 3}) public void testNeedsDefrag(int previousVersion) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); for (UUID snapshotId : snapshotIds) { try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapshotId)) { @@ -761,7 +762,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { mockedLiveFiles.add(createMockLiveFileMetaData("ot2.sst", "otherTable", "k1", "k2")); mockSnapshotStore(snapshotId, mockedLiveFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); Path snapshotYaml = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); // Create an existing YAML file for the snapshot assertTrue(snapshotYaml.toFile().createNewFile()); From 519495ac08e1ac29cc50ad6a6cceb7a84ddcb0af Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 18:29:28 -0400 Subject: [PATCH 084/126] HDDS-13785. Address review comments Change-Id: I1a672ce523de5c7d8317766d7ce8f49fee3d099e --- .../main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java | 4 ++-- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 6 +++--- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 652a384b2cc9..469900aa8ea7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -678,9 +678,9 @@ public final class OMConfigKeys { public static final String OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT = "ozone.om.hierarchical.resource.locks.hard.limit"; public static final int OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT = 10000; - public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL = + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL = "ozone.om.snapshot.local.data.manager.service.interval"; - public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT = "5m"; + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT = "5m"; /** * Never constructed. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 4833da95c91f..9c3a9d0cdbbc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.snapshot; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import com.google.common.annotations.VisibleForTesting; @@ -315,7 +315,7 @@ private void init(OzoneConfiguration configuration, SnapshotChainManager chainMa increamentOrphanCheckCount(snapshotId); } long snapshotLocalDataManagerServiceInterval = configuration.getTimeDuration( - OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT, + OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); if (snapshotLocalDataManagerServiceInterval > 0) { this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 006c946abacf..912be56c4bd5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdds.StringUtils.bytes2String; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; @@ -177,7 +177,7 @@ public void setUp() throws IOException { purgedSnapshotIdMap.clear(); snapshotUtilMock.when(() -> OmSnapshotManager.isSnapshotPurged(any(), any(), any(), any())) .thenAnswer(i -> purgedSnapshotIdMap.getOrDefault(i.getArgument(2), false)); - conf.setInt(OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, -1); + conf.setInt(OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, -1); } @AfterEach From c125250bcf4e88ae6372d7d46b1824c6141d3561 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 18:30:20 -0400 Subject: [PATCH 085/126] HDDS-13785. Address review comments Change-Id: I6e817c1d5a27c5c0d8a3cb4a296638057ed21951 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 9c3a9d0cdbbc..9af48367dde7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -265,13 +265,13 @@ void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws } } - private void increamentOrphanCheckCount(UUID snapshotId) { + private void incrementOrphanCheckCount(UUID snapshotId) { if (snapshotId != null) { this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> v == null ? 1 : (v + 1)); } } - private void decreamentOrphanCheckCount(UUID snapshotId, int decrementBy) { + private void decrementOrphanCheckCount(UUID snapshotId, int decrementBy) { this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> { if (v == null) { return null; @@ -312,7 +312,7 @@ private void init(OzoneConfiguration configuration, SnapshotChainManager chainMa addVersionNodeWithDependents(snapshotLocalData); } for (UUID snapshotId : versionNodeMap.keySet()) { - increamentOrphanCheckCount(snapshotId); + incrementOrphanCheckCount(snapshotId); } long snapshotLocalDataManagerServiceInterval = configuration.getTimeDuration( OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT, @@ -337,7 +337,7 @@ private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, Snap UUID snapshotId = entry.getKey(); int countBeforeCheck = entry.getValue(); checkOrphanSnapshotVersions(metadataManager, chainManager, snapshotId); - decreamentOrphanCheckCount(snapshotId, countBeforeCheck); + decrementOrphanCheckCount(snapshotId, countBeforeCheck); } } @@ -855,14 +855,14 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, // version removals) if (versionsRemoved || !Objects.equals(existingSnapVersions.getPreviousSnapshotId(), snapshotVersions.getPreviousSnapshotId())) { - increamentOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); + incrementOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); } // If the transactionInfo set this means the snapshot has been purged and the entire yaml file could have // become an orphan if the version is also updated it // could mean that there could be some orphan version present within the // same snapshot. if (transactionInfoSet || existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { - increamentOrphanCheckCount(snapshotId); + incrementOrphanCheckCount(snapshotId); } } } finally { From ec59b893ecb6262f7f69b6c5551474c1c5e5e9fa Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 00:17:36 -0400 Subject: [PATCH 086/126] HDDS-13785. Address review comments Change-Id: I6870b5263d104e4179dbc13b24bd923238ff9171 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 12 +++++++++--- .../om/snapshot/TestOmSnapshotLocalDataManager.java | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 9af48367dde7..e6e940290987 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -284,6 +284,7 @@ private void decrementOrphanCheckCount(UUID snapshotId, int decrementBy) { }); } + @VisibleForTesting Map getSnapshotToBeCheckedForOrphans() { return snapshotToBeCheckedForOrphans; } @@ -315,7 +316,8 @@ private void init(OzoneConfiguration configuration, SnapshotChainManager chainMa incrementOrphanCheckCount(snapshotId); } long snapshotLocalDataManagerServiceInterval = configuration.getTimeDuration( - OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT, + OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, + OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); if (snapshotLocalDataManagerServiceInterval > 0) { this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1); @@ -344,6 +346,7 @@ private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, Snap @VisibleForTesting void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChainManager chainManager, UUID snapshotId) throws IOException { + LOG.info("Checking orphan snapshot versions for snapshot {}", snapshotId); try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = new WritableOmSnapshotLocalDataProvider( snapshotId)) { OmSnapshotLocalData snapshotLocalData = snapshotLocalDataProvider.getSnapshotLocalData(); @@ -361,6 +364,9 @@ void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChai && ((versionEntry.getVersion() != 0 && versionEntry.getVersion() != snapshotLocalData.getVersion()) || isSnapshotPurged); if (toRemove) { + LOG.info("Removing snapshot Id : {} version: {} from local data, snapshotLocalDataVersion : {}, " + + "snapshotPurged: {}, inDegree : {}", snapshotId, versionEntry.getVersion(), + snapshotLocalData.getVersion(), isSnapshotPurged, localDataGraph.inDegree(versionEntry)); snapshotLocalDataProvider.removeVersion(versionEntry.getVersion()); } } finally { @@ -857,8 +863,8 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, snapshotVersions.getPreviousSnapshotId())) { incrementOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); } - // If the transactionInfo set this means the snapshot has been purged and the entire yaml file could have - // become an orphan if the version is also updated it + // If the transactionInfo set, this means the snapshot has been purged and the entire YAML file could have + // become an orphan. Otherwise if the version is updated it // could mean that there could be some orphan version present within the // same snapshot. if (transactionInfoSet || existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 912be56c4bd5..60f2cdac1801 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -512,7 +512,7 @@ public void testOrphanVersionDeletionWithChainUpdate(boolean purgeSnapshot) thro assertTrue(localDataManager.getSnapshotToBeCheckedForOrphans().containsKey(secondSnapId)); localDataManager.checkOrphanSnapshotVersions(omMetadataManager, null, secondSnapId); if (purgeSnapshot) { - NoSuchFileException e = assertThrows(NoSuchFileException.class, + assertThrows(NoSuchFileException.class, () -> localDataManager.getOmSnapshotLocalData(secondSnapId)); assertFalse(localDataManager.getVersionNodeMap().containsKey(secondSnapId)); } else { From b0b6d6a3a6b3eb00d253f4035934991326aa2ca9 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 00:31:51 -0400 Subject: [PATCH 087/126] HDDS-13785. Address review comments Change-Id: I5fb8bcc7beb9941e67df0d02a40b09e30a4c3880 --- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index e6e940290987..0ae6174974a8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -372,7 +372,11 @@ void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChai } finally { internalLock.readLock().unlock(); } - + } + // If Snapshot is purged but not flushed completely to disk then this needs to wait for the next iteration + // which can be done by incrementing the orphan check count for the snapshotId. + if (!snapshotLocalData.getVersionSstFileInfos().isEmpty() && snapshotLocalData.getTransactionInfo() != null) { + incrementOrphanCheckCount(snapshotId); } snapshotLocalDataProvider.commit(); } From a7598071309691dd91c496999d91c59380d55cda Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 00:36:00 -0400 Subject: [PATCH 088/126] HDDS-13785. Change catch exception Change-Id: I30f586045a567b0e0e3200a449d7627d96f59dd5 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 0ae6174974a8..e08ee2830947 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -325,7 +325,7 @@ private void init(OzoneConfiguration configuration, SnapshotChainManager chainMa () -> { try { checkOrphanSnapshotVersions(omMetadataManager, chainManager); - } catch (IOException e) { + } catch (Exception e) { LOG.error("Exception while checking orphan snapshot versions", e); } }, snapshotLocalDataManagerServiceInterval, snapshotLocalDataManagerServiceInterval, TimeUnit.MILLISECONDS); From 808b17484d985fc25c670a77eb0920a685afcd2b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 00:38:09 -0400 Subject: [PATCH 089/126] HDDS-13785. Address review comments Change-Id: Ia13da4aa1fb25768b0dac64fd4203ed1ca596284 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index e08ee2830947..33c43eda0234 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -820,7 +820,7 @@ public synchronized void commit() throws IOException { Files.move(tmpFile.toPath(), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); } else if (snapshotLocalDataFile.exists()) { - LOG.info("Deleting Yaml file corresponding to snapshotId: {} in path : {}", + LOG.info("Deleting YAML file corresponding to snapshotId: {} in path : {}", super.snapshotId, snapshotLocalDataFile.getAbsolutePath()); if (!snapshotLocalDataFile.delete()) { throw new IOException("Unable to delete file " + snapshotLocalDataFile.getAbsolutePath()); From c829a8be16375e8ca863bdae84278c4f31861507 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 01:04:59 -0400 Subject: [PATCH 090/126] HDDS-13830. Fix test Change-Id: I88f541c712d847515f6ef5b1d094cc5168936966 --- .../hadoop/ozone/client/OzoneSnapshot.java | 18 ++++++++++++++++++ .../hadoop/ozone/client/TestOzoneSnapshot.java | 4 +++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java index 95f05a50e064..360fd4cef6da 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java @@ -222,4 +222,22 @@ public int hashCode() { return Objects.hash(volumeName, bucketName, name, creationTime, snapshotStatus, snapshotId, snapshotPath, checkpointDir, referencedSize, referencedReplicatedSize, exclusiveSize, exclusiveReplicatedSize); } + + @Override + public String toString() { + return "OzoneSnapshot{" + + "bucketName='" + bucketName + '\'' + + ", volumeName='" + volumeName + '\'' + + ", name='" + name + '\'' + + ", creationTime=" + creationTime + + ", snapshotStatus=" + snapshotStatus + + ", snapshotId=" + snapshotId + + ", snapshotPath='" + snapshotPath + '\'' + + ", checkpointDir='" + checkpointDir + '\'' + + ", referencedSize=" + referencedSize + + ", referencedReplicatedSize=" + referencedReplicatedSize + + ", exclusiveSize=" + exclusiveSize + + ", exclusiveReplicatedSize=" + exclusiveReplicatedSize + + '}'; + } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java index 16cf58ab5a2c..0ddacef84acf 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; import java.util.UUID; @@ -41,6 +42,7 @@ private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { when(snapshotInfo.getSnapshotStatus()).thenReturn(SNAPSHOT_ACTIVE); when(snapshotInfo.getSnapshotId()).thenReturn(snapshotId); when(snapshotInfo.getSnapshotPath()).thenReturn("volume/bucket"); + when(snapshotInfo.getCheckpointDirName(eq(0))).thenReturn("checkpointDir"); when(snapshotInfo.getReferencedSize()).thenReturn(1000L); when(snapshotInfo.getReferencedReplicatedSize()).thenReturn(3000L); when(snapshotInfo.getExclusiveSize()).thenReturn(4000L); @@ -57,7 +59,7 @@ public void testOzoneSnapshotFromSnapshotInfo() { OzoneSnapshot ozoneSnapshot = OzoneSnapshot.fromSnapshotInfo(snapshotInfo); OzoneSnapshot expectedOzoneSnapshot = new OzoneSnapshot( "volume", "bucket", "snap", 1000L, SNAPSHOT_ACTIVE, snapshotId, - "volume/bucket", OM_SNAPSHOT_SEPARATOR + snapshotId, 1000L, 3000L, 6000L, 18000L); + "volume/bucket", "checkpointDir", 1000L, 3000L, 6000L, 18000L); assertEquals(expectedOzoneSnapshot, ozoneSnapshot); } } From bfd341c692f86f765477f119c149ce267c7fcdd3 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 01:09:55 -0400 Subject: [PATCH 091/126] HDDS-13830. Fix test Change-Id: I5541c7f931bb0b60af238102b5afc8086be53a6c --- .../java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java index 0ddacef84acf..028e937a9c2e 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.client; -import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.eq; From 4ccd3fcf3b37a919c946e37e78380ebbd74b7137 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 07:35:23 -0400 Subject: [PATCH 092/126] HDDS-13830. Fix test Change-Id: I7077460d4b9d87d460244e3df51d51f078a83970 --- .../org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java | 1 - .../interface-client/src/main/proto/OmClientProtocol.proto | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java index 7750b32e2e0a..64946e52e886 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java @@ -85,7 +85,6 @@ private OzoneManagerProtocolProtos.SnapshotInfo createSnapshotInfoProto() { .setPathPreviousSnapshotID(toProtobuf(PATH_PREVIOUS_SNAPSHOT_ID)) .setGlobalPreviousSnapshotID(toProtobuf(GLOBAL_PREVIOUS_SNAPSHOT_ID)) .setSnapshotPath(SNAPSHOT_PATH) - .setCheckpointDir(CHECKPOINT_DIR) .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER) .setDeepClean(false) .setSstFiltered(false) diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 61a3c1d6792e..1e5675f612e6 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -880,7 +880,7 @@ message SnapshotInfo { optional hadoop.hdds.UUID pathPreviousSnapshotID = 8; optional hadoop.hdds.UUID globalPreviousSnapshotID = 9; optional string snapshotPath = 10; - optional string checkpointDir = 11; + optional string checkpointDir = 11 [deprecated = true]; optional int64 dbTxSequenceNumber = 12; optional bool deepClean = 13; optional bool sstFiltered = 14; From 4fd3b0e1ba6d407dd62cd5fce567db79c5729b2b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 17:20:00 -0400 Subject: [PATCH 093/126] HDDS-13849. Refactor getTablePrefix function in SnapshotDiff flow Change-Id: If068daeb7ca9bf3234614a29da90edcb9fb5778d --- .../org/apache/hadoop/hdds/StringUtils.java | 7 + .../hadoop/hdds/utils/db/RocksDatabase.java | 10 +- .../hadoop/hdds/utils/db/TablePrefixInfo.java | 52 ++++ .../hadoop/hdds/utils/db/package-info.java | 21 ++ .../apache/ozone/rocksdb/util/RdbUtil.java | 10 +- .../ozone/rocksdiff/DifferSnapshotInfo.java | 10 +- .../rocksdiff/RocksDBCheckpointDiffer.java | 22 +- .../ozone/rocksdiff/RocksDiffUtils.java | 55 ++--- .../TestRocksDBCheckpointDiffer.java | 232 ++++++++++-------- .../ozone/rocksdiff/TestRocksDiffUtils.java | 114 +++++---- .../hadoop/ozone/freon/TestOMSnapshotDAG.java | 26 +- .../hadoop/ozone/om/OMMetadataManager.java | 11 + .../hadoop/ozone/om/KeyManagerImpl.java | 25 +- .../ozone/om/OmMetadataManagerImpl.java | 65 +++++ .../hadoop/ozone/om/OmSnapshotManager.java | 72 +----- .../hadoop/ozone/om/SstFilteringService.java | 13 +- .../OMSnapshotMoveTableKeysRequest.java | 24 +- .../ozone/om/snapshot/OmSnapshotUtils.java | 2 +- .../om/snapshot/SnapshotDiffManager.java | 71 ++---- .../ozone/om/snapshot/SnapshotUtils.java | 25 -- .../om/snapshot/TestSnapshotDiffManager.java | 55 +++-- .../ozone/repair/ldb/TestLdbRepair.java | 2 +- 22 files changed, 512 insertions(+), 412 deletions(-) create mode 100644 hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/TablePrefixInfo.java create mode 100644 hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java index cfcac1a77121..a3bd1e62ffcd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java @@ -123,4 +123,11 @@ public static String getLexicographicallyHigherString(String val) { charVal[lastIdx] += 1; return String.valueOf(charVal); } + + public static String getFirstNChars(String str, int n) { + if (str == null || str.length() < n) { + return str; + } + return str.substring(0, n); + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index b93626060c80..64bbb3711014 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -854,18 +854,14 @@ private int getLastLevel() throws RocksDatabaseException { /** * Deletes sst files which do not correspond to prefix * for given table. - * @param prefixPairs a map of TableName to prefixUsed. + * @param prefixInfo a map of TableName to prefixUsed. */ - public void deleteFilesNotMatchingPrefix(Map prefixPairs) throws RocksDatabaseException { + public void deleteFilesNotMatchingPrefix(TablePrefixInfo prefixInfo) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { for (LiveFileMetaData liveFileMetaData : getSstFileList()) { String sstFileColumnFamily = StringUtils.bytes2String(liveFileMetaData.columnFamilyName()); int lastLevel = getLastLevel(); - if (!prefixPairs.containsKey(sstFileColumnFamily)) { - continue; - } - // RocksDB #deleteFile API allows only to delete the last level of // SST Files. Any level < last level won't get deleted and // only last file of level 0 can be deleted @@ -876,7 +872,7 @@ public void deleteFilesNotMatchingPrefix(Map prefixPairs) throws continue; } - String prefixForColumnFamily = prefixPairs.get(sstFileColumnFamily); + String prefixForColumnFamily = prefixInfo.getTablePrefix(sstFileColumnFamily); String firstDbKey = StringUtils.bytes2String(liveFileMetaData.smallestKey()); String lastDbKey = StringUtils.bytes2String(liveFileMetaData.largestKey()); boolean isKeyWithPrefixPresent = RocksDiffUtils.isKeyWithPrefixPresent( diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/TablePrefixInfo.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/TablePrefixInfo.java new file mode 100644 index 000000000000..65d88962362e --- /dev/null +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/TablePrefixInfo.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db; + +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +/** + * Encapsulates a store's prefix info corresponding to tables in a db. + */ +public class TablePrefixInfo { + private final Map tablePrefixes; + + public TablePrefixInfo(Map tablePrefixes) { + this.tablePrefixes = Collections.unmodifiableMap(tablePrefixes); + } + + public String getTablePrefix(String tableName) { + return tablePrefixes.getOrDefault(tableName, ""); + } + + public int size() { + return tablePrefixes.size(); + } + + public Set getTableNames() { + return tablePrefixes.keySet(); + } + + @Override + public String toString() { + return "TablePrefixInfo{" + + "tablePrefixes=" + tablePrefixes + + '}'; + } +} diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java new file mode 100644 index 000000000000..48b831ecb8f7 --- /dev/null +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Util package for rocksdb. + */ +package org.apache.hadoop.hdds.utils.db; diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java index 97eaa945fdce..95c4a4aa2bb3 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java @@ -17,7 +17,6 @@ package org.apache.ozone.rocksdb.util; -import com.google.common.collect.Sets; import java.io.File; import java.io.IOException; import java.nio.file.Files; @@ -43,21 +42,20 @@ public final class RdbUtil { private RdbUtil() { } public static List getLiveSSTFilesForCFs( - final ManagedRocksDB rocksDB, List cfs) { - final Set cfSet = Sets.newHashSet(cfs); + final ManagedRocksDB rocksDB, Set cfs) { return rocksDB.get().getLiveFilesMetaData().stream() - .filter(lfm -> cfSet.contains(StringUtils.bytes2String(lfm.columnFamilyName()))) + .filter(lfm -> cfs.contains(StringUtils.bytes2String(lfm.columnFamilyName()))) .collect(Collectors.toList()); } public static Set getSSTFilesForComparison( - final ManagedRocksDB rocksDB, List cfs) { + final ManagedRocksDB rocksDB, Set cfs) { return getLiveSSTFilesForCFs(rocksDB, cfs).stream() .map(lfm -> new File(lfm.path(), lfm.fileName()).getPath()) .collect(Collectors.toCollection(HashSet::new)); } - public static Map getSSTFilesWithInodesForComparison(final ManagedRocksDB rocksDB, List cfs) + public static Map getSSTFilesWithInodesForComparison(final ManagedRocksDB rocksDB, Set cfs) throws IOException { List liveSSTFilesForCFs = getLiveSSTFilesForCFs(rocksDB, cfs); Map inodeToSstMap = new HashMap<>(); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/DifferSnapshotInfo.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/DifferSnapshotInfo.java index 501725ca7c2a..c72f56d5f116 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/DifferSnapshotInfo.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/DifferSnapshotInfo.java @@ -17,8 +17,8 @@ package org.apache.ozone.rocksdiff; -import java.util.Map; import java.util.UUID; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; /** @@ -29,17 +29,17 @@ public class DifferSnapshotInfo { private final UUID snapshotId; private final long snapshotGeneration; - private final Map tablePrefixes; + private final TablePrefixInfo tablePrefixes; private final ManagedRocksDB rocksDB; public DifferSnapshotInfo(String db, UUID id, long gen, - Map prefixes, + TablePrefixInfo tablePrefixInfo, ManagedRocksDB rocksDB) { dbPath = db; snapshotId = id; snapshotGeneration = gen; - tablePrefixes = prefixes; + tablePrefixes = tablePrefixInfo; this.rocksDB = rocksDB; } @@ -55,7 +55,7 @@ public long getSnapshotGeneration() { return snapshotGeneration; } - public Map getTablePrefixes() { + public TablePrefixInfo getTablePrefixes() { return tablePrefixes; } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index 184b1d1fc42a..1023efdacc3c 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -32,6 +32,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; import com.google.common.graph.MutableGraph; import com.google.protobuf.InvalidProtocolBufferException; import java.io.BufferedWriter; @@ -617,10 +618,10 @@ private String trimSSTFilename(String filename) { * @param rocksDB open rocksDB instance. * @return a list of SST files (without extension) in the DB. */ - public Set readRocksDBLiveFiles(ManagedRocksDB rocksDB) { + public Set readRocksDBLiveFiles(ManagedRocksDB rocksDB, Set tableFilter) { HashSet liveFiles = new HashSet<>(); - final List cfs = Arrays.asList( + final Set cfs = Sets.newHashSet( org.apache.hadoop.hdds.StringUtils.bytes2String( RocksDB.DEFAULT_COLUMN_FAMILY), "keyTable", "directoryTable", "fileTable"); @@ -630,6 +631,9 @@ public Set readRocksDBLiveFiles(ManagedRocksDB rocksDB) { RdbUtil.getLiveSSTFilesForCFs(rocksDB, cfs); LOG.debug("SST File Metadata for DB: " + rocksDB.get().getName()); for (LiveFileMetaData m : liveFileMetaDataList) { + if (!tableFilter.contains(StringUtils.bytes2String(m.columnFamilyName()))) { + continue; + } LOG.debug("File: {}, Level: {}", m.fileName(), m.level()); final String trimmedFilename = trimSSTFilename(m.fileName()); liveFiles.add(trimmedFilename); @@ -820,10 +824,10 @@ private String getSSTFullPath(String sstFilenameWithoutExtension, * "/path/to/sstBackupDir/000060.sst"] */ public synchronized Optional> getSSTDiffListWithFullPath(DifferSnapshotInfo src, - DifferSnapshotInfo dest, + DifferSnapshotInfo dest, Set tablesToLookup, String sstFilesDirForSnapDiffJob) { - Optional> sstDiffList = getSSTDiffList(src, dest); + Optional> sstDiffList = getSSTDiffList(src, dest, tablesToLookup); return sstDiffList.map(diffList -> diffList.stream() .map( @@ -850,12 +854,12 @@ public synchronized Optional> getSSTDiffListWithFullPath(DifferSnap * @return A list of SST files without extension. e.g. ["000050", "000060"] */ public synchronized Optional> getSSTDiffList(DifferSnapshotInfo src, - DifferSnapshotInfo dest) { + DifferSnapshotInfo dest, Set tablesToLookup) { // TODO: Reject or swap if dest is taken after src, once snapshot chain // integration is done. - Set srcSnapFiles = readRocksDBLiveFiles(src.getRocksDB()); - Set destSnapFiles = readRocksDBLiveFiles(dest.getRocksDB()); + Set srcSnapFiles = readRocksDBLiveFiles(src.getRocksDB(), tablesToLookup); + Set destSnapFiles = readRocksDBLiveFiles(dest.getRocksDB(), tablesToLookup); Set fwdDAGSameFiles = new HashSet<>(); Set fwdDAGDifferentFiles = new HashSet<>(); @@ -891,9 +895,9 @@ public synchronized Optional> getSSTDiffList(DifferSnapshotInfo src } } - if (src.getTablePrefixes() != null && !src.getTablePrefixes().isEmpty()) { + if (src.getTablePrefixes() != null && src.getTablePrefixes().size() != 0) { RocksDiffUtils.filterRelevantSstFiles(fwdDAGDifferentFiles, src.getTablePrefixes(), - compactionDag.getCompactionMap(), src.getRocksDB(), dest.getRocksDB()); + compactionDag.getCompactionMap(), tablesToLookup, src.getRocksDB(), dest.getRocksDB()); } return Optional.of(new ArrayList<>(fwdDAGDifferentFiles)); } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java index 86577147b62b..7d9512768bc1 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java @@ -17,7 +17,7 @@ package org.apache.ozone.rocksdiff; -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.hdds.StringUtils.getFirstNChars; import com.google.common.annotations.VisibleForTesting; import java.util.Collections; @@ -25,9 +25,8 @@ import java.util.Iterator; import java.util.Map; import java.util.Set; -import org.apache.commons.collections4.MapUtils; import org.apache.commons.io.FilenameUtils; -import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.rocksdb.util.SstFileInfo; @@ -49,41 +48,26 @@ private RocksDiffUtils() { public static boolean isKeyWithPrefixPresent(String prefixForColumnFamily, String firstDbKey, String lastDbKey) { - String firstKeyPrefix = constructBucketKey(firstDbKey); - String endKeyPrefix = constructBucketKey(lastDbKey); + String firstKeyPrefix = getFirstNChars(firstDbKey, prefixForColumnFamily.length()); + String endKeyPrefix = getFirstNChars(lastDbKey, prefixForColumnFamily.length()); return firstKeyPrefix.compareTo(prefixForColumnFamily) <= 0 && prefixForColumnFamily.compareTo(endKeyPrefix) <= 0; } - public static String constructBucketKey(String keyName) { - if (!keyName.startsWith(OM_KEY_PREFIX)) { - keyName = OM_KEY_PREFIX.concat(keyName); - } - String[] elements = keyName.split(OM_KEY_PREFIX); - String volume = elements[1]; - String bucket = elements[2]; - StringBuilder builder = - new StringBuilder().append(OM_KEY_PREFIX).append(volume); - - if (StringUtils.isNotBlank(bucket)) { - builder.append(OM_KEY_PREFIX).append(bucket); - } - builder.append(OM_KEY_PREFIX); - return builder.toString(); - } - public static void filterRelevantSstFiles(Set inputFiles, - Map tableToPrefixMap, + TablePrefixInfo tablePrefixInfo, + Set columnFamiliesToLookup, ManagedRocksDB... dbs) { - filterRelevantSstFiles(inputFiles, tableToPrefixMap, Collections.emptyMap(), dbs); + filterRelevantSstFiles(inputFiles, tablePrefixInfo, Collections.emptyMap(), columnFamiliesToLookup, dbs); } /** * Filter sst files based on prefixes. */ public static void filterRelevantSstFiles(Set inputFiles, - Map tableToPrefixMap, + TablePrefixInfo tablePrefixInfo, Map preExistingCompactionNodes, + Set columnFamiliesToLookup, ManagedRocksDB... dbs) { Map liveFileMetaDataMap = new HashMap<>(); int dbIdx = 0; @@ -100,41 +84,38 @@ public static void filterRelevantSstFiles(Set inputFiles, compactionNode = new CompactionNode(new CompactionFileInfo.Builder(filename) .setValues(liveFileMetaDataMap.get(filename)).build()); } - if (shouldSkipNode(compactionNode, tableToPrefixMap)) { + if (shouldSkipNode(compactionNode, tablePrefixInfo, columnFamiliesToLookup)) { fileIterator.remove(); } } } @VisibleForTesting - static boolean shouldSkipNode(SstFileInfo node, - Map columnFamilyToPrefixMap) { + static boolean shouldSkipNode(SstFileInfo node, TablePrefixInfo tablePrefixInfo, Set columnFamiliesToLookup) { // This is for backward compatibility. Before the compaction log table // migration, startKey, endKey and columnFamily information is not persisted // in compaction log files. // Also for the scenario when there is an exception in reading SST files // for the file node. - if (node.getStartKey() == null || node.getEndKey() == null || - node.getColumnFamily() == null) { + if (node.getStartKey() == null || node.getEndKey() == null || node.getColumnFamily() == null) { LOG.debug("Compaction node with fileName: {} doesn't have startKey, " + "endKey and columnFamily details.", node.getFileName()); return false; } - if (MapUtils.isEmpty(columnFamilyToPrefixMap)) { - LOG.debug("Provided columnFamilyToPrefixMap is null or empty."); + if (tablePrefixInfo.size() == 0) { + LOG.debug("Provided tablePrefixInfo is null or empty."); return false; } - if (!columnFamilyToPrefixMap.containsKey(node.getColumnFamily())) { + if (!columnFamiliesToLookup.contains(node.getColumnFamily())) { LOG.debug("SstFile node: {} is for columnFamily: {} while filter map " + "contains columnFamilies: {}.", node.getFileName(), - node.getColumnFamily(), columnFamilyToPrefixMap.keySet()); + node.getColumnFamily(), tablePrefixInfo); return true; } - String keyPrefix = columnFamilyToPrefixMap.get(node.getColumnFamily()); - return !isKeyWithPrefixPresent(keyPrefix, node.getStartKey(), - node.getEndKey()); + String keyPrefix = tablePrefixInfo.getTablePrefix(node.getColumnFamily()); + return !isKeyWithPrefixPresent(keyPrefix, node.getStartKey(), node.getEndKey()); } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index fbdb9ea21989..c59f6aeb491f 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -86,6 +86,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedCheckpoint; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; @@ -237,35 +238,35 @@ public class TestRocksDBCheckpointDiffer { ) ); - private static Map columnFamilyToPrefixMap1 = - new HashMap() { + private static TablePrefixInfo columnFamilyToPrefixMap1 = + new TablePrefixInfo(new HashMap() { { put("keyTable", "/volume/bucket1/"); // Simply using bucketName instead of ID for the test. put("directoryTable", "/volume/bucket1/"); put("fileTable", "/volume/bucket1/"); } - }; + }); - private static Map columnFamilyToPrefixMap2 = - new HashMap() { + private static TablePrefixInfo columnFamilyToPrefixMap2 = + new TablePrefixInfo(new HashMap() { { put("keyTable", "/volume/bucket2/"); // Simply using bucketName instead of ID for the test. put("directoryTable", "/volume/bucket2/"); put("fileTable", "/volume/bucket2/"); } - }; + }); - private static Map columnFamilyToPrefixMap3 = - new HashMap() { + private static TablePrefixInfo columnFamilyToPrefixMap3 = + new TablePrefixInfo(new HashMap() { { put("keyTable", "/volume/bucket3/"); // Simply using bucketName instead of ID for the test. put("directoryTable", "/volume/bucket3/"); put("fileTable", "/volume/bucket3/"); } - }; + }); private static final int NUM_ROW = 250000; private static final int SNAPSHOT_EVERY_SO_MANY_KEYS = 49999; @@ -513,7 +514,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { DifferSnapshotInfo snapshotInfo4 = new DifferSnapshotInfo( "/path/to/dbcp4", UUID.randomUUID(), 18000L, null, Mockito.mock(ManagedRocksDB.class)); - Map prefixMap = ImmutableMap.of("col1", "c", "col2", "d"); + TablePrefixInfo prefixMap = new TablePrefixInfo(ImmutableMap.of("col1", "c", "col2", "d")); DifferSnapshotInfo snapshotInfo5 = new DifferSnapshotInfo( "/path/to/dbcp2", UUID.randomUUID(), 0L, prefixMap, Mockito.mock(ManagedRocksDB.class)); DifferSnapshotInfo snapshotInfo6 = new DifferSnapshotInfo( @@ -740,92 +741,102 @@ public void testGetSSTDiffListWithoutDB(String description, Set expectedSSTDiffFiles, boolean expectingException, Map metaDataMap) { - try (MockedStatic mockedRocksdiffUtil = Mockito.mockStatic(RocksDiffUtils.class, - Mockito.CALLS_REAL_METHODS)) { - mockedRocksdiffUtil.when(() -> RocksDiffUtils.constructBucketKey(anyString())).thenAnswer(i -> i.getArgument(0)); - boolean exceptionThrown = false; - if (compactionLog != null) { - // Construct DAG from compaction log input - Arrays.stream(compactionLog.split("\n")).forEach( - rocksDBCheckpointDiffer::processCompactionLogLine); - } else if (compactionLogEntries != null) { - compactionLogEntries.forEach(entry -> - rocksDBCheckpointDiffer.addToCompactionLogTable(entry)); + + boolean exceptionThrown = false; + if (compactionLog != null) { + // Construct DAG from compaction log input + Arrays.stream(compactionLog.split("\n")).forEach( + rocksDBCheckpointDiffer::processCompactionLogLine); + } else if (compactionLogEntries != null) { + compactionLogEntries.forEach(entry -> + rocksDBCheckpointDiffer.addToCompactionLogTable(entry)); + } else { + throw new IllegalArgumentException("One of compactionLog and " + + "compactionLogEntries should be non-null."); + } + rocksDBCheckpointDiffer.loadAllCompactionLogs(); + + Set actualSameSstFiles = new HashSet<>(); + Set actualDiffSstFiles = new HashSet<>(); + + try { + rocksDBCheckpointDiffer.internalGetSSTDiffList( + srcSnapshot, + destSnapshot, + srcSnapshotSstFiles, + destSnapshotSstFiles, + actualSameSstFiles, + actualDiffSstFiles); + } catch (RuntimeException rtEx) { + if (!expectingException) { + fail("Unexpected exception thrown in test."); } else { - throw new IllegalArgumentException("One of compactionLog and " + - "compactionLogEntries should be non-null."); + exceptionThrown = true; } - rocksDBCheckpointDiffer.loadAllCompactionLogs(); + } - Set actualSameSstFiles = new HashSet<>(); - Set actualDiffSstFiles = new HashSet<>(); + if (expectingException && !exceptionThrown) { + fail("Expecting exception but none thrown."); + } + // Check same and different SST files result + assertEquals(expectedSameSstFiles, actualSameSstFiles); + assertEquals(expectedDiffSstFiles, actualDiffSstFiles); + try (MockedStatic mockedHandler = Mockito.mockStatic(RdbUtil.class, Mockito.CALLS_REAL_METHODS)) { + RocksDB rocksDB = Mockito.mock(RocksDB.class); + Mockito.when(rocksDB.getName()).thenReturn("dummy"); + Mockito.when(srcSnapshot.getRocksDB().get()).thenReturn(rocksDB); + Mockito.when(destSnapshot.getRocksDB().get()).thenReturn(rocksDB); + Mockito.when(srcSnapshot.getRocksDB().getLiveMetadataForSSTFiles()) + .thenAnswer(invocation -> srcSnapshotSstFiles.stream().filter(metaDataMap::containsKey).map(file -> { + LiveFileMetaData liveFileMetaData = Mockito.mock(LiveFileMetaData.class); + String[] metaData = metaDataMap.get(file); + Mockito.when(liveFileMetaData.fileName()).thenReturn("/" + file + SST_FILE_EXTENSION); + Mockito.when(liveFileMetaData.smallestKey()).thenReturn(metaData[0].getBytes(UTF_8)); + Mockito.when(liveFileMetaData.largestKey()).thenReturn(metaData[1].getBytes(UTF_8)); + Mockito.when(liveFileMetaData.columnFamilyName()).thenReturn(metaData[2].getBytes(UTF_8)); + return liveFileMetaData; + }).collect(Collectors.toMap(liveFileMetaData -> FilenameUtils.getBaseName(liveFileMetaData.fileName()), + Function.identity()))); + Set tablesToLookup; + String dummyTable; + if (srcSnapshot.getTablePrefixes() != null) { + tablesToLookup = srcSnapshot.getTablePrefixes().getTableNames(); + dummyTable = tablesToLookup.stream().findAny().get(); + } else { + tablesToLookup = mock(Set.class); + when(tablesToLookup.contains(anyString())).thenReturn(true); + dummyTable = "dummy"; + } + mockedHandler.when(() -> RdbUtil.getLiveSSTFilesForCFs(any(), any())) + .thenAnswer(i -> { + Set sstFiles = i.getArgument(0).equals(srcSnapshot.getRocksDB()) ? srcSnapshotSstFiles + : destSnapshotSstFiles; + return sstFiles.stream().map(fileName -> { + LiveFileMetaData liveFileMetaData = mock(LiveFileMetaData.class); + when(liveFileMetaData.fileName()).thenReturn("/" + fileName + SST_FILE_EXTENSION); + when(liveFileMetaData.columnFamilyName()).thenReturn(dummyTable.getBytes(UTF_8)); + return liveFileMetaData; + }).collect(Collectors.toList()); + }); try { - rocksDBCheckpointDiffer.internalGetSSTDiffList( - srcSnapshot, - destSnapshot, - srcSnapshotSstFiles, - destSnapshotSstFiles, - actualSameSstFiles, - actualDiffSstFiles); + Assertions.assertEquals(Optional.ofNullable(expectedSSTDiffFiles) + .map(files -> files.stream().sorted().collect(Collectors.toList())).orElse(null), + rocksDBCheckpointDiffer.getSSTDiffList(srcSnapshot, destSnapshot, tablesToLookup) + .map(i -> i.stream().sorted().collect(Collectors.toList())).orElse(null)); } catch (RuntimeException rtEx) { if (!expectingException) { + rtEx.printStackTrace(); fail("Unexpected exception thrown in test."); } else { exceptionThrown = true; } } - - if (expectingException && !exceptionThrown) { - fail("Expecting exception but none thrown."); - } - - // Check same and different SST files result - assertEquals(expectedSameSstFiles, actualSameSstFiles); - assertEquals(expectedDiffSstFiles, actualDiffSstFiles); - try (MockedStatic mockedHandler = Mockito.mockStatic(RdbUtil.class, Mockito.CALLS_REAL_METHODS)) { - RocksDB rocksDB = Mockito.mock(RocksDB.class); - Mockito.when(rocksDB.getName()).thenReturn("dummy"); - Mockito.when(srcSnapshot.getRocksDB().get()).thenReturn(rocksDB); - Mockito.when(destSnapshot.getRocksDB().get()).thenReturn(rocksDB); - Mockito.when(srcSnapshot.getRocksDB().getLiveMetadataForSSTFiles()) - .thenAnswer(invocation -> srcSnapshotSstFiles.stream().filter(metaDataMap::containsKey).map(file -> { - LiveFileMetaData liveFileMetaData = Mockito.mock(LiveFileMetaData.class); - String[] metaData = metaDataMap.get(file); - Mockito.when(liveFileMetaData.fileName()).thenReturn("/" + file + SST_FILE_EXTENSION); - Mockito.when(liveFileMetaData.smallestKey()).thenReturn(metaData[0].getBytes(UTF_8)); - Mockito.when(liveFileMetaData.largestKey()).thenReturn(metaData[1].getBytes(UTF_8)); - Mockito.when(liveFileMetaData.columnFamilyName()).thenReturn(metaData[2].getBytes(UTF_8)); - return liveFileMetaData; - }).collect(Collectors.toMap(liveFileMetaData -> FilenameUtils.getBaseName(liveFileMetaData.fileName()), - Function.identity()))); - mockedHandler.when(() -> RdbUtil.getLiveSSTFilesForCFs(any(), any())) - .thenAnswer(i -> { - Set sstFiles = i.getArgument(0).equals(srcSnapshot.getRocksDB()) ? srcSnapshotSstFiles - : destSnapshotSstFiles; - return sstFiles.stream().map(fileName -> { - LiveFileMetaData liveFileMetaData = Mockito.mock(LiveFileMetaData.class); - Mockito.when(liveFileMetaData.fileName()).thenReturn("/" + fileName + SST_FILE_EXTENSION); - return liveFileMetaData; - }).collect(Collectors.toList()); - }); - try { - Assertions.assertEquals(Optional.ofNullable(expectedSSTDiffFiles) - .map(files -> files.stream().sorted().collect(Collectors.toList())).orElse(null), - rocksDBCheckpointDiffer.getSSTDiffList(srcSnapshot, destSnapshot) - .map(i -> i.stream().sorted().collect(Collectors.toList())).orElse(null)); - } catch (RuntimeException rtEx) { - if (!expectingException) { - fail("Unexpected exception thrown in test."); - } else { - exceptionThrown = true; - } - } - } - if (expectingException && !exceptionThrown) { - fail("Expecting exception but none thrown."); - } } + if (expectingException && !exceptionThrown) { + fail("Expecting exception but none thrown."); + } + } /** @@ -909,13 +920,41 @@ void diffAllSnapshots(RocksDBCheckpointDiffer differ) assertEquals(snapshots.size(), expectedDifferResult.size()); int index = 0; + List expectedDiffFiles = new ArrayList<>(); for (DifferSnapshotInfo snap : snapshots) { - // Returns a list of SST files to be fed into RocksDiff - List sstDiffList = differ.getSSTDiffList(src, snap).orElse(Collections.emptyList()); - LOG.info("SST diff list from '{}' to '{}': {}", - src.getDbPath(), snap.getDbPath(), sstDiffList); + // Returns a list of SST files to be fed into RocksCheckpointDiffer Dag. + List tablesToTrack = new ArrayList<>(COLUMN_FAMILIES_TO_TRACK_IN_DAG); + // Add some invalid index. + tablesToTrack.add("compactionLogTable"); + Set tableToLookUp = new HashSet<>(); + for (int i = 0; i < Math.pow(2, tablesToTrack.size()); i++) { + tableToLookUp.clear(); + expectedDiffFiles.clear(); + int mask = i; + while (mask != 0) { + int firstSetBitIndex = Integer.numberOfTrailingZeros(mask); + tableToLookUp.add(tablesToTrack.get(firstSetBitIndex)); + mask &= mask - 1; + } + for (String diffFile : expectedDifferResult.get(index)) { + String columnFamily; + if (rocksDBCheckpointDiffer.getCompactionNodeMap().containsKey(diffFile)) { + columnFamily = rocksDBCheckpointDiffer.getCompactionNodeMap().get(diffFile).getColumnFamily(); + } else { + columnFamily = bytes2String(src.getRocksDB().getLiveMetadataForSSTFiles().get(diffFile).columnFamilyName()); + } + if (columnFamily == null || tableToLookUp.contains(columnFamily)) { + expectedDiffFiles.add(diffFile); + } + } + List sstDiffList = differ.getSSTDiffList(src, snap, tableToLookUp).orElse(Collections.emptyList()); + LOG.info("SST diff list from '{}' to '{}': {} tables: {}", + src.getDbPath(), snap.getDbPath(), sstDiffList, tableToLookUp); + + assertEquals(expectedDiffFiles, sstDiffList); + + } - assertEquals(expectedDifferResult.get(index), sstDiffList); ++index; } } @@ -1573,7 +1612,7 @@ public void testGetSSTDiffListWithoutDB2( Set destSnapshotSstFiles, Set expectedSameSstFiles, Set expectedDiffSstFiles, - Map columnFamilyToPrefixMap + TablePrefixInfo columnFamilyPrefixInfo ) { compactionLogEntryList.forEach(entry -> rocksDBCheckpointDiffer.addToCompactionLogTable(entry)); @@ -1583,9 +1622,9 @@ public void testGetSSTDiffListWithoutDB2( // Snapshot is used for logging purpose and short-circuiting traversal. // Using gen 0 for this test. DifferSnapshotInfo mockedSourceSnapshot = new DifferSnapshotInfo( - "/path/to/dbcp1", UUID.randomUUID(), 0L, columnFamilyToPrefixMap, null); + "/path/to/dbcp1", UUID.randomUUID(), 0L, columnFamilyPrefixInfo, null); DifferSnapshotInfo mockedDestinationSnapshot = new DifferSnapshotInfo( - "/path/to/dbcp2", UUID.randomUUID(), 0L, columnFamilyToPrefixMap, null); + "/path/to/dbcp2", UUID.randomUUID(), 0L, columnFamilyPrefixInfo, null); Set actualSameSstFiles = new HashSet<>(); Set actualDiffSstFiles = new HashSet<>(); @@ -1621,7 +1660,7 @@ private static Stream shouldSkipNodeCases() { @ParameterizedTest() @MethodSource("shouldSkipNodeCases") - public void testShouldSkipNode(Map columnFamilyToPrefixMap, + public void testShouldSkipNode(TablePrefixInfo tablePrefixInfo, List expectedResponse) { compactionLogEntryList.forEach(entry -> rocksDBCheckpointDiffer.addToCompactionLogTable(entry)); @@ -1632,8 +1671,7 @@ public void testShouldSkipNode(Map columnFamilyToPrefixMap, .getCompactionNodeMap().values().stream() .sorted(Comparator.comparing(CompactionNode::getFileName)) .map(node -> - RocksDiffUtils.shouldSkipNode(node, - columnFamilyToPrefixMap)) + RocksDiffUtils.shouldSkipNode(node, tablePrefixInfo, tablePrefixInfo.getTableNames())) .collect(Collectors.toList()); assertEquals(expectedResponse, actualResponse); @@ -1646,7 +1684,7 @@ private static Stream shouldSkipNodeEdgeCases() { CompactionNode nullEndKeyNode = new CompactionNode("fileName", 100, "startKey", null, "columnFamily"); return Stream.of( - Arguments.of(node, Collections.emptyMap(), false), + Arguments.of(node, new TablePrefixInfo(Collections.emptyMap()), false), Arguments.of(node, columnFamilyToPrefixMap1, true), Arguments.of(nullColumnFamilyNode, columnFamilyToPrefixMap1, false), Arguments.of(nullStartKeyNode, columnFamilyToPrefixMap1, false), @@ -1657,7 +1695,7 @@ private static Stream shouldSkipNodeEdgeCases() { @MethodSource("shouldSkipNodeEdgeCases") public void testShouldSkipNodeEdgeCase( CompactionNode node, - Map columnFamilyToPrefixMap, + TablePrefixInfo columnFamilyPrefixInfo, boolean expectedResponse ) { compactionLogEntryList.forEach(entry -> @@ -1666,7 +1704,7 @@ public void testShouldSkipNodeEdgeCase( rocksDBCheckpointDiffer.loadAllCompactionLogs(); assertEquals(expectedResponse, RocksDiffUtils.shouldSkipNode(node, - columnFamilyToPrefixMap)); + columnFamilyPrefixInfo, columnFamilyPrefixInfo.getTableNames())); } private void createKeys(ColumnFamilyHandle cfh, diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java index 324c29015e12..a44baf1905f3 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java @@ -17,20 +17,24 @@ package org.apache.ozone.rocksdiff; +import static org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.params.provider.Arguments.arguments; -import static org.mockito.ArgumentMatchers.anyString; import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.assertj.core.util.Sets; import org.junit.jupiter.api.Assertions; @@ -38,7 +42,6 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import org.mockito.MockedStatic; import org.mockito.Mockito; import org.rocksdb.LiveFileMetaData; import org.rocksdb.RocksDB; @@ -95,23 +98,35 @@ public void testFilterRelevantSstFilesWithPreExistingCompactionInfo(String valid String validSSTFileEndRange, String invalidSSTFileStartRange, String invalidSSTFileEndRange) { - try (MockedStatic mockedHandler = Mockito.mockStatic(RocksDiffUtils.class, - Mockito.CALLS_REAL_METHODS)) { - mockedHandler.when(() -> RocksDiffUtils.constructBucketKey(anyString())).thenAnswer(i -> i.getArgument(0)); - String validSstFile = "filePath/validSSTFile.sst"; - String invalidSstFile = "filePath/invalidSSTFile.sst"; - String untrackedSstFile = "filePath/untrackedSSTFile.sst"; - String expectedPrefix = String.valueOf((char)(((int)validSSTFileEndRange.charAt(0) + - validSSTFileStartRange.charAt(0)) / 2)); - Set sstFile = Sets.newTreeSet(validSstFile, invalidSstFile, untrackedSstFile); - RocksDiffUtils.filterRelevantSstFiles(sstFile, ImmutableMap.of(validSSTColumnFamilyName, expectedPrefix), - ImmutableMap.of("validSSTFile", new CompactionNode(validSstFile, 0, validSSTFileStartRange, + String validSstFile = "filePath/validSSTFile.sst"; + String invalidSstFile = "filePath/invalidSSTFile.sst"; + String untrackedSstFile = "filePath/untrackedSSTFile.sst"; + String expectedPrefix = String.valueOf((char)(((int)validSSTFileEndRange.charAt(0) + + validSSTFileStartRange.charAt(0)) / 2)); + Set sstFile = Sets.newTreeSet(validSstFile, invalidSstFile, untrackedSstFile); + Set inputSstFiles = new HashSet<>(); + List> tablesToLookupSet = Arrays.asList(ImmutableSet.of(validSSTColumnFamilyName), + ImmutableSet.of(invalidColumnFamilyName), ImmutableSet.of(validSSTColumnFamilyName, invalidColumnFamilyName), + Collections.emptySet()); + for (Set tablesToLookup : tablesToLookupSet) { + inputSstFiles.clear(); + inputSstFiles.addAll(sstFile); + RocksDiffUtils.filterRelevantSstFiles(inputSstFiles, + new TablePrefixInfo( + new HashMap() {{ + put(invalidColumnFamilyName, getLexicographicallyHigherString(invalidSSTFileEndRange)); + put(validSSTColumnFamilyName, expectedPrefix); + }}), ImmutableMap.of("validSSTFile", new CompactionNode(validSstFile, 0, validSSTFileStartRange, validSSTFileEndRange, validSSTColumnFamilyName), "invalidSSTFile", - new CompactionNode(invalidSstFile, 0, invalidSSTFileStartRange, - invalidSSTFileEndRange, invalidColumnFamilyName))); - Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile), sstFile); + new CompactionNode(invalidSstFile, 0, invalidSSTFileStartRange, + invalidSSTFileEndRange, invalidColumnFamilyName)), tablesToLookup); + if (tablesToLookup.contains(validSSTColumnFamilyName)) { + Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile), inputSstFiles, + "Failed for " + tablesToLookup); + } else { + Assertions.assertEquals(Sets.newTreeSet(untrackedSstFile), inputSstFiles, "Failed for " + tablesToLookup); + } } - } private LiveFileMetaData getMockedLiveFileMetadata(String columnFamilyName, String startRange, @@ -133,44 +148,39 @@ public void testFilterRelevantSstFilesFromDB(String validSSTColumnFamilyName, String validSSTFileEndRange, String invalidSSTFileStartRange, String invalidSSTFileEndRange) { - try (MockedStatic mockedHandler = Mockito.mockStatic(RocksDiffUtils.class, - Mockito.CALLS_REAL_METHODS)) { - mockedHandler.when(() -> RocksDiffUtils.constructBucketKey(anyString())).thenAnswer(i -> i.getArgument(0)); - for (int numberOfDBs = 1; numberOfDBs < 10; numberOfDBs++) { - String validSstFile = "filePath/validSSTFile.sst"; - String invalidSstFile = "filePath/invalidSSTFile.sst"; - String untrackedSstFile = "filePath/untrackedSSTFile.sst"; - int expectedDBKeyIndex = numberOfDBs / 2; - ManagedRocksDB[] rocksDBs = - IntStream.range(0, numberOfDBs).mapToObj(i -> Mockito.mock(ManagedRocksDB.class)) - .collect(Collectors.toList()).toArray(new ManagedRocksDB[numberOfDBs]); - for (int i = 0; i < numberOfDBs; i++) { - ManagedRocksDB managedRocksDB = rocksDBs[i]; - RocksDB mockedRocksDB = Mockito.mock(RocksDB.class); - Mockito.when(managedRocksDB.get()).thenReturn(mockedRocksDB); - if (i == expectedDBKeyIndex) { - LiveFileMetaData validLiveFileMetaData = getMockedLiveFileMetadata(validSSTColumnFamilyName, - validSSTFileStartRange, validSSTFileEndRange, "validSSTFile"); - LiveFileMetaData invalidLiveFileMetaData = getMockedLiveFileMetadata(invalidColumnFamilyName, - invalidSSTFileStartRange, invalidSSTFileEndRange, "invalidSSTFile"); - List liveFileMetaDatas = Arrays.asList(validLiveFileMetaData, invalidLiveFileMetaData); - Mockito.when(mockedRocksDB.getLiveFilesMetaData()).thenReturn(liveFileMetaDatas); - } else { - Mockito.when(mockedRocksDB.getLiveFilesMetaData()).thenReturn(Collections.emptyList()); - } - Mockito.when(managedRocksDB.getLiveMetadataForSSTFiles()) - .thenAnswer(invocation -> ManagedRocksDB.getLiveMetadataForSSTFiles(mockedRocksDB)); + for (int numberOfDBs = 1; numberOfDBs < 10; numberOfDBs++) { + String validSstFile = "filePath/validSSTFile.sst"; + String invalidSstFile = "filePath/invalidSSTFile.sst"; + String untrackedSstFile = "filePath/untrackedSSTFile.sst"; + int expectedDBKeyIndex = numberOfDBs / 2; + ManagedRocksDB[] rocksDBs = + IntStream.range(0, numberOfDBs).mapToObj(i -> Mockito.mock(ManagedRocksDB.class)) + .collect(Collectors.toList()).toArray(new ManagedRocksDB[numberOfDBs]); + for (int i = 0; i < numberOfDBs; i++) { + ManagedRocksDB managedRocksDB = rocksDBs[i]; + RocksDB mockedRocksDB = Mockito.mock(RocksDB.class); + Mockito.when(managedRocksDB.get()).thenReturn(mockedRocksDB); + if (i == expectedDBKeyIndex) { + LiveFileMetaData validLiveFileMetaData = getMockedLiveFileMetadata(validSSTColumnFamilyName, + validSSTFileStartRange, validSSTFileEndRange, "validSSTFile"); + LiveFileMetaData invalidLiveFileMetaData = getMockedLiveFileMetadata(invalidColumnFamilyName, + invalidSSTFileStartRange, invalidSSTFileEndRange, "invalidSSTFile"); + List liveFileMetaDatas = Arrays.asList(validLiveFileMetaData, invalidLiveFileMetaData); + Mockito.when(mockedRocksDB.getLiveFilesMetaData()).thenReturn(liveFileMetaDatas); + } else { + Mockito.when(mockedRocksDB.getLiveFilesMetaData()).thenReturn(Collections.emptyList()); } - - String expectedPrefix = String.valueOf((char)(((int)validSSTFileEndRange.charAt(0) + - validSSTFileStartRange.charAt(0)) / 2)); - Set sstFile = Sets.newTreeSet(validSstFile, invalidSstFile, untrackedSstFile); - RocksDiffUtils.filterRelevantSstFiles(sstFile, ImmutableMap.of(validSSTColumnFamilyName, expectedPrefix), - Collections.emptyMap(), rocksDBs); - Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile), sstFile); + Mockito.when(managedRocksDB.getLiveMetadataForSSTFiles()) + .thenAnswer(invocation -> ManagedRocksDB.getLiveMetadataForSSTFiles(mockedRocksDB)); } + String expectedPrefix = String.valueOf((char)(((int)validSSTFileEndRange.charAt(0) + + validSSTFileStartRange.charAt(0)) / 2)); + Set sstFile = Sets.newTreeSet(validSstFile, invalidSstFile, untrackedSstFile); + RocksDiffUtils.filterRelevantSstFiles(sstFile, new TablePrefixInfo(ImmutableMap.of(validSSTColumnFamilyName, + expectedPrefix)), Collections.emptyMap(), + ImmutableSet.of(validSSTColumnFamilyName), rocksDBs); + Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile), sstFile); } - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java index 5429dc0f4a12..bd2f17f1deb7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java @@ -22,7 +22,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.DB_COMPACTION_SST_BACKUP_DIR; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIFF_DIR; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getColumnFamilyToKeyPrefixMap; +import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COLUMN_FAMILIES_TO_TRACK_IN_DAG; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -154,8 +154,7 @@ private DifferSnapshotInfo getDifferSnapshotInfo( // persisted at the time of snapshot creation, as the snapshot generation return new DifferSnapshotInfo(checkpointPath, snapshotInfo.getSnapshotId(), snapshotInfo.getDbTxSequenceNumber(), - getColumnFamilyToKeyPrefixMap(omMetadataManager, volumeName, - bucketName), + omMetadataManager.getTableBucketPrefix(volumeName, bucketName), snapshotDB); } @@ -229,7 +228,8 @@ public void testDAGReconstruction() final File checkpointSnap2 = new File(snap2.getDbPath()); GenericTestUtils.waitFor(checkpointSnap2::exists, 2000, 20000); - List sstDiffList21 = differ.getSSTDiffList(snap2, snap1).orElse(Collections.emptyList()); + List sstDiffList21 = differ.getSSTDiffList(snap2, snap1, COLUMN_FAMILIES_TO_TRACK_IN_DAG) + .orElse(Collections.emptyList()); LOG.debug("Got diff list: {}", sstDiffList21); // Delete 1000 keys, take a 3rd snapshot, and do another diff @@ -248,13 +248,16 @@ public void testDAGReconstruction() final File checkpointSnap3 = new File(snap3.getDbPath()); GenericTestUtils.waitFor(checkpointSnap3::exists, 2000, 20000); - List sstDiffList32 = differ.getSSTDiffList(snap3, snap2).orElse(Collections.emptyList()); + List sstDiffList32 = differ.getSSTDiffList(snap3, snap2, COLUMN_FAMILIES_TO_TRACK_IN_DAG) + .orElse(Collections.emptyList()); // snap3-snap1 diff result is a combination of snap3-snap2 and snap2-snap1 - List sstDiffList31 = differ.getSSTDiffList(snap3, snap1).orElse(Collections.emptyList()); + List sstDiffList31 = differ.getSSTDiffList(snap3, snap1, COLUMN_FAMILIES_TO_TRACK_IN_DAG) + .orElse(Collections.emptyList()); // Same snapshot. Result should be empty list - List sstDiffList22 = differ.getSSTDiffList(snap2, snap2).orElse(Collections.emptyList()); + List sstDiffList22 = differ.getSSTDiffList(snap2, snap2, COLUMN_FAMILIES_TO_TRACK_IN_DAG) + .orElse(Collections.emptyList()); assertThat(sstDiffList22).isEmpty(); snapDB1.close(); snapDB2.close(); @@ -280,13 +283,16 @@ public void testDAGReconstruction() volumeName, bucketName, "snap3", ((RDBStore) snapDB3.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); - List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1).orElse(Collections.emptyList()); + List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1, COLUMN_FAMILIES_TO_TRACK_IN_DAG) + .orElse(Collections.emptyList()); assertEquals(sstDiffList21, sstDiffList21Run2); - List sstDiffList32Run2 = differ.getSSTDiffList(snap3, snap2).orElse(Collections.emptyList()); + List sstDiffList32Run2 = differ.getSSTDiffList(snap3, snap2, COLUMN_FAMILIES_TO_TRACK_IN_DAG) + .orElse(Collections.emptyList()); assertEquals(sstDiffList32, sstDiffList32Run2); - List sstDiffList31Run2 = differ.getSSTDiffList(snap3, snap1).orElse(Collections.emptyList()); + List sstDiffList31Run2 = differ.getSSTDiffList(snap3, snap1, COLUMN_FAMILIES_TO_TRACK_IN_DAG) + .orElse(Collections.emptyList()); assertEquals(sstDiffList31, sstDiffList31Run2); snapDB1.close(); snapDB2.close(); diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 7a0872277341..baac362da741 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.common.BlockGroup; @@ -686,6 +687,15 @@ String getMultipartKey(long volumeId, long bucketId, boolean containsIncompleteMPUs(String volume, String bucket) throws IOException; + TablePrefixInfo getTableBucketPrefix(String volume, String bucket) throws IOException; + + /** + * Computes the bucket prefix for a table. + * @return would return "" if the table doesn't have bucket prefixed based key. + * @throws IOException + */ + String getTableBucketPrefix(String tableName, String volume, String bucket) throws IOException; + /** * Represents a unique identifier for a specific bucket within a volume. * @@ -724,4 +734,5 @@ public int hashCode() { return Objects.hash(volumeId, bucketId); } } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index e458fa73236a..b9153eeef69b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -808,9 +808,10 @@ public PendingKeysDeletion getPendingDeletionKeys( int notReclaimableKeyCount = 0; // Bucket prefix would be empty if volume is empty i.e. either null or "". - Optional bucketPrefix = getBucketPrefix(volume, bucket, false); + Table deletedTable = metadataManager.getDeletedTable(); + Optional bucketPrefix = getBucketPrefix(volume, bucket, deletedTable); try (TableIterator> - delKeyIter = metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) { + delKeyIter = deletedTable.iterator(bucketPrefix.orElse(""))) { /* Seeking to the start key if it not null. The next key picked up would be ensured to start with the bucket prefix, {@link org.apache.hadoop.hdds.utils.db.Table#iterator(bucketPrefix)} would ensure this. @@ -887,7 +888,7 @@ private List> getTableEntries(String startKey, return entries; } - private Optional getBucketPrefix(String volumeName, String bucketName, boolean isFSO) throws IOException { + private Optional getBucketPrefix(String volumeName, String bucketName, Table table) throws IOException { // Bucket prefix would be empty if both volume & bucket is empty i.e. either null or "". if (StringUtils.isEmpty(volumeName) && StringUtils.isEmpty(bucketName)) { return Optional.empty(); @@ -895,17 +896,17 @@ private Optional getBucketPrefix(String volumeName, String bucketName, b throw new IOException("One of volume : " + volumeName + ", bucket: " + bucketName + " is empty." + " Either both should be empty or none of the arguments should be empty"); } - return isFSO ? Optional.of(metadataManager.getBucketKeyPrefixFSO(volumeName, bucketName)) : - Optional.of(metadataManager.getBucketKeyPrefix(volumeName, bucketName)); + return Optional.of(metadataManager.getTableBucketPrefix(table.getName(), volumeName, bucketName)); } @Override public List> getRenamesKeyEntries( String volume, String bucket, String startKey, CheckedFunction, Boolean, IOException> filter, int size) throws IOException { - Optional bucketPrefix = getBucketPrefix(volume, bucket, false); + Table snapshotRenamedTable = metadataManager.getSnapshotRenamedTable(); + Optional bucketPrefix = getBucketPrefix(volume, bucket, snapshotRenamedTable); try (TableIterator> - renamedKeyIter = metadataManager.getSnapshotRenamedTable().iterator(bucketPrefix.orElse(""))) { + renamedKeyIter = snapshotRenamedTable.iterator(bucketPrefix.orElse(""))) { return getTableEntries(startKey, renamedKeyIter, Function.identity(), filter, size); } } @@ -953,9 +954,10 @@ public List>> getDeletedKeyEntries( String volume, String bucket, String startKey, CheckedFunction, Boolean, IOException> filter, int size) throws IOException { - Optional bucketPrefix = getBucketPrefix(volume, bucket, false); + Table deletedTable = metadataManager.getDeletedTable(); + Optional bucketPrefix = getBucketPrefix(volume, bucket, deletedTable); try (TableIterator> - delKeyIter = metadataManager.getDeletedTable().iterator(bucketPrefix.orElse(""))) { + delKeyIter = deletedTable.iterator(bucketPrefix.orElse(""))) { return getTableEntries(startKey, delKeyIter, RepeatedOmKeyInfo::cloneOmKeyInfoList, filter, size); } } @@ -2259,8 +2261,9 @@ private void slimLocationVersion(OmKeyInfo... keyInfos) { @Override public TableIterator> getDeletedDirEntries( String volume, String bucket) throws IOException { - Optional bucketPrefix = getBucketPrefix(volume, bucket, true); - return metadataManager.getDeletedDirTable().iterator(bucketPrefix.orElse("")); + Table deletedDirTable = metadataManager.getDeletedDirTable(); + Optional bucketPrefix = getBucketPrefix(volume, bucket, deletedDirTable); + return deletedDirTable.iterator(bucketPrefix.orElse("")); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index c7b071a6e8d9..ab2287c5a378 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -28,6 +28,18 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED_DEFAULT; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.BUCKET_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DELETED_DIR_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.MULTIPART_INFO_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.OPEN_FILE_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.OPEN_KEY_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.SNAPSHOT_INFO_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.SNAPSHOT_RENAMED_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.VOLUME_TABLE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; @@ -72,6 +84,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.TypedTable; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -1846,6 +1859,58 @@ public boolean containsIncompleteMPUs(String volume, String bucket) return false; } + // NOTE: Update both getTableBucketPrefixInfo(volume, bucket) & getTableBucketPrefix(tableName, volume, bucket) + // simultaneously. Implemented duplicate functions to avoid computing bucketKeyPrefix redundantly for each and + // every table over and over again. + @Override + public TablePrefixInfo getTableBucketPrefix(String volume, String bucket) throws IOException { + String keyPrefix = getBucketKeyPrefix(volume, bucket); + String keyPrefixFso = getBucketKeyPrefixFSO(volume, bucket); + // Set value to 12 to avoid creating too big a HashTable unnecessarily. + Map tablePrefixMap = new HashMap<>(12, 1.0f); + + tablePrefixMap.put(VOLUME_TABLE, getVolumeKey(volume)); + tablePrefixMap.put(BUCKET_TABLE, getBucketKey(volume, bucket)); + + tablePrefixMap.put(KEY_TABLE, keyPrefix); + tablePrefixMap.put(DELETED_TABLE, keyPrefix); + tablePrefixMap.put(SNAPSHOT_RENAMED_TABLE, keyPrefix); + tablePrefixMap.put(OPEN_KEY_TABLE, keyPrefix); + tablePrefixMap.put(MULTIPART_INFO_TABLE, keyPrefix); + tablePrefixMap.put(SNAPSHOT_INFO_TABLE, keyPrefix); + + tablePrefixMap.put(FILE_TABLE, keyPrefixFso); + tablePrefixMap.put(DIRECTORY_TABLE, keyPrefixFso); + tablePrefixMap.put(DELETED_DIR_TABLE, keyPrefixFso); + tablePrefixMap.put(OPEN_FILE_TABLE, keyPrefixFso); + + return new TablePrefixInfo(tablePrefixMap); + } + + @Override + public String getTableBucketPrefix(String tableName, String volume, String bucket) throws IOException { + switch (tableName) { + case VOLUME_TABLE: + return getVolumeKey(volume); + case BUCKET_TABLE: + return getBucketKey(volume, bucket); + case KEY_TABLE: + case DELETED_TABLE: + case SNAPSHOT_RENAMED_TABLE: + case OPEN_KEY_TABLE: + case MULTIPART_INFO_TABLE: + case SNAPSHOT_INFO_TABLE: + return getBucketKeyPrefix(volume, bucket); + case FILE_TABLE: + case DIRECTORY_TABLE: + case DELETED_DIR_TABLE: + case OPEN_FILE_TABLE: + return getBucketKeyPrefixFSO(volume, bucket); + default: + return ""; + } + } + @Override public void close() throws IOException { stop(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 7b9beb80cf6f..9f645a5c72a2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -61,6 +61,7 @@ import com.google.common.base.Preconditions; import com.google.common.cache.CacheLoader; import com.google.common.cache.RemovalListener; +import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import jakarta.annotation.Nonnull; import java.io.File; @@ -528,14 +529,11 @@ public static DBCheckpoint createOmSnapshotCheckpoint( // Clean up active DB's deletedTable right after checkpoint is taken, // Snapshot create is processed as a single transaction and // transactions are flushed sequentially so, no need to take any lock as of now. - deleteKeysFromDelKeyTableInSnapshotScope(omMetadataManager, - snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), batchOperation); - // Clean up deletedDirectoryTable as well - deleteKeysFromDelDirTableInSnapshotScope(omMetadataManager, - snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), batchOperation); - // Remove entries from snapshotRenamedTable - deleteKeysFromSnapRenamedTableInSnapshotScope(omMetadataManager, - snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), batchOperation); + for (Table table : ImmutableList.of(omMetadataManager.getDeletedTable(), + omMetadataManager.getDeletedDirTable(), omMetadataManager.getSnapshotRenamedTable())) { + deleteKeysFromTableWithBucketPrefix(omMetadataManager, table, + snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), batchOperation); + } if (snapshotDirExist) { LOG.info("Checkpoint: {} for snapshot {} already exists.", @@ -552,51 +550,21 @@ public static DBCheckpoint createOmSnapshotCheckpoint( /** * Helper method to perform batch delete range operation on a given key prefix. - * @param prefix prefix of keys to be deleted + * @param metadataManager metadatManager instance * @param table table from which keys are to be deleted + * @param volume volume corresponding to the bucket + * @param bucket bucket corresponding to which keys need to be deleted from the table * @param batchOperation batch operation */ - private static void deleteKeysFromTableWithPrefix( - String prefix, Table table, BatchOperation batchOperation) throws IOException { + private static void deleteKeysFromTableWithBucketPrefix(OMMetadataManager metadataManager, + Table table, String volume, String bucket, BatchOperation batchOperation) throws IOException { + String prefix = metadataManager.getTableBucketPrefix(table.getName(), volume, bucket); String endKey = getLexicographicallyHigherString(prefix); LOG.debug("Deleting key range from {} - startKey: {}, endKey: {}", table.getName(), prefix, endKey); table.deleteRangeWithBatch(batchOperation, prefix, endKey); } - /** - * Helper method to delete DB keys in the snapshot scope (bucket) - * from active DB's deletedDirectoryTable. - * @param omMetadataManager OMMetadataManager instance - * @param volumeName volume name - * @param bucketName bucket name - * @param batchOperation batch operation - */ - private static void deleteKeysFromSnapRenamedTableInSnapshotScope( - OMMetadataManager omMetadataManager, String volumeName, - String bucketName, BatchOperation batchOperation) throws IOException { - - final String keyPrefix = omMetadataManager.getBucketKeyPrefix(volumeName, bucketName); - deleteKeysFromTableWithPrefix(keyPrefix, omMetadataManager.getSnapshotRenamedTable(), batchOperation); - } - - /** - * Helper method to delete DB keys in the snapshot scope (bucket) - * from active DB's deletedDirectoryTable. - * @param omMetadataManager OMMetadataManager instance - * @param volumeName volume name - * @param bucketName bucket name - * @param batchOperation batch operation - */ - private static void deleteKeysFromDelDirTableInSnapshotScope( - OMMetadataManager omMetadataManager, String volumeName, - String bucketName, BatchOperation batchOperation) throws IOException { - - // Range delete start key (inclusive) - final String keyPrefix = omMetadataManager.getBucketKeyPrefixFSO(volumeName, bucketName); - deleteKeysFromTableWithPrefix(keyPrefix, omMetadataManager.getDeletedDirTable(), batchOperation); - } - @VisibleForTesting public SnapshotDiffManager getSnapshotDiffManager() { return snapshotDiffManager; @@ -607,22 +575,6 @@ public SnapshotDiffCleanupService getSnapshotDiffCleanupService() { return snapshotDiffCleanupService; } - /** - * Helper method to delete DB keys in the snapshot scope (bucket) - * from active DB's deletedTable. - * @param omMetadataManager OMMetadataManager instance - * @param volumeName volume name - * @param bucketName bucket name - * @param batchOperation batch operation - */ - private static void deleteKeysFromDelKeyTableInSnapshotScope( - OMMetadataManager omMetadataManager, String volumeName, - String bucketName, BatchOperation batchOperation) throws IOException { - // Range delete prefix (inclusive) - final String keyPrefix = omMetadataManager.getBucketKeyPrefix(volumeName, bucketName); - deleteKeysFromTableWithPrefix(keyPrefix, omMetadataManager.getDeletedTable(), batchOperation); - } - /** * Captures the list of SST files for keyTable, fileTable and directoryTable in the DB. * @param store AOS or snapshot DB for not defragged or defragged snapshot respectively. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index 522ea7df6de5..9b2ea0cad68f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -20,14 +20,12 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_SST_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_SST_DELETING_LIMIT_PER_TASK_DEFAULT; import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getColumnFamilyToKeyPrefixMap; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -42,6 +40,7 @@ import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -181,11 +180,9 @@ public BackgroundTaskResult call() throws Exception { LOG.debug("Processing snapshot {} to filter relevant SST Files", snapShotTableKey); - - Map columnFamilyNameToPrefixMap = - getColumnFamilyToKeyPrefixMap(ozoneManager.getMetadataManager(), - snapshotInfo.getVolumeName(), - snapshotInfo.getBucketName()); + TablePrefixInfo bucketPrefixInfo = + ozoneManager.getMetadataManager().getTableBucketPrefix(snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName()); try ( UncheckedAutoCloseableSupplier snapshotMetadataReader = @@ -199,7 +196,7 @@ public BackgroundTaskResult call() throws Exception { RocksDatabase db = rdbStore.getDb(); try (BootstrapStateHandler.Lock lock = getBootstrapStateLock() .lock()) { - db.deleteFilesNotMatchingPrefix(columnFamilyNameToPrefixMap); + db.deleteFilesNotMatchingPrefix(bucketPrefixInfo); } markSSTFilteredFlagForSnapshot(snapshotInfo); snapshotLimit--; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java index 63cc010b790a..fef5dc76c4de 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java @@ -80,22 +80,21 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { UUID fromSnapshotID = fromProtobuf(moveTableKeysRequest.getFromSnapshotID()); SnapshotInfo fromSnapshot = SnapshotUtils.getSnapshotInfo(ozoneManager, snapshotChainManager, fromSnapshotID); - String bucketKeyPrefix = omMetadataManager.getBucketKeyPrefix(fromSnapshot.getVolumeName(), - fromSnapshot.getBucketName()); - String bucketKeyPrefixFSO = omMetadataManager.getBucketKeyPrefixFSO(fromSnapshot.getVolumeName(), - fromSnapshot.getBucketName()); + Set keys = new HashSet<>(); List deletedKeys = new ArrayList<>(moveTableKeysRequest.getDeletedKeysList().size()); //validate deleted key starts with bucket prefix.[///] + String deletedTablePrefix = omMetadataManager.getTableBucketPrefix(omMetadataManager.getDeletedTable().getName(), + fromSnapshot.getVolumeName(), fromSnapshot.getBucketName()); for (SnapshotMoveKeyInfos deletedKey : moveTableKeysRequest.getDeletedKeysList()) { // Filter only deleted keys with at least one keyInfo per key. if (!deletedKey.getKeyInfosList().isEmpty()) { deletedKeys.add(deletedKey); - if (!deletedKey.getKey().startsWith(bucketKeyPrefix)) { + if (!deletedKey.getKey().startsWith(deletedTablePrefix)) { OMException ex = new OMException("Deleted Key: " + deletedKey + " doesn't start with prefix " - + bucketKeyPrefix, OMException.ResultCodes.INVALID_KEY_NAME); + + deletedTablePrefix, OMException.ResultCodes.INVALID_KEY_NAME); if (LOG.isDebugEnabled()) { AUDIT.logWriteFailure(ozoneManager.buildAuditMessageForFailure(OMSystemAction.SNAPSHOT_MOVE_TABLE_KEYS, null, ex)); @@ -117,14 +116,17 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } keys.clear(); + String renamedTablePrefix = omMetadataManager.getTableBucketPrefix( + omMetadataManager.getSnapshotRenamedTable().getName(), fromSnapshot.getVolumeName(), + fromSnapshot.getBucketName()); List renamedKeysList = new ArrayList<>(moveTableKeysRequest.getRenamedKeysList().size()); //validate rename key starts with bucket prefix.[///] for (HddsProtos.KeyValue renamedKey : moveTableKeysRequest.getRenamedKeysList()) { if (renamedKey.hasKey() && renamedKey.hasValue()) { renamedKeysList.add(renamedKey); - if (!renamedKey.getKey().startsWith(bucketKeyPrefix)) { + if (!renamedKey.getKey().startsWith(renamedTablePrefix)) { OMException ex = new OMException("Rename Key: " + renamedKey + " doesn't start with prefix " - + bucketKeyPrefix, OMException.ResultCodes.INVALID_KEY_NAME); + + renamedTablePrefix, OMException.ResultCodes.INVALID_KEY_NAME); if (LOG.isDebugEnabled()) { AUDIT.logWriteFailure(ozoneManager.buildAuditMessageForFailure(OMSystemAction.SNAPSHOT_MOVE_TABLE_KEYS, null, ex)); @@ -147,15 +149,17 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { keys.clear(); // Filter only deleted dirs with only one keyInfo per key. + String deletedDirTablePrefix = omMetadataManager.getTableBucketPrefix( + omMetadataManager.getDeletedDirTable().getName(), fromSnapshot.getVolumeName(), fromSnapshot.getBucketName()); List deletedDirs = new ArrayList<>(moveTableKeysRequest.getDeletedDirsList().size()); //validate deleted key starts with bucket FSO path prefix.[///] for (SnapshotMoveKeyInfos deletedDir : moveTableKeysRequest.getDeletedDirsList()) { // Filter deleted directories with exactly one keyInfo per key. if (deletedDir.getKeyInfosList().size() == 1) { deletedDirs.add(deletedDir); - if (!deletedDir.getKey().startsWith(bucketKeyPrefixFSO)) { + if (!deletedDir.getKey().startsWith(deletedDirTablePrefix)) { OMException ex = new OMException("Deleted dir: " + deletedDir + " doesn't start with prefix " + - bucketKeyPrefixFSO, OMException.ResultCodes.INVALID_KEY_NAME); + deletedDirTablePrefix, OMException.ResultCodes.INVALID_KEY_NAME); if (LOG.isDebugEnabled()) { AUDIT.logWriteFailure(ozoneManager.buildAuditMessageForFailure(OMSystemAction.SNAPSHOT_MOVE_TABLE_KEYS, null, ex)); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java index 497c7a064b8b..55132f71d5cd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java @@ -86,7 +86,7 @@ public static Object getINode(Path file) throws IOException { * @throws IOException if an I/O error occurs */ public static String getFileInodeAndLastModifiedTimeString(Path file) throws IOException { - Object inode = Files.readAttributes(file, BasicFileAttributes.class).fileKey(); + Object inode = getINode(file); FileTime mTime = Files.getLastModifiedTime(file); return String.format("%s-%s", inode, mTime.toMillis()); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 21c2b5979a72..e5bc8dcfa91a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.snapshot; import static org.apache.commons.lang3.StringUtils.leftPad; +import static org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString; import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.CREATE; import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.DELETE; import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.MODIFY; @@ -38,11 +39,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE; -import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; -import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotActive; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.dropColumnFamilyHandle; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getColumnFamilyToKeyPrefixMap; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getSnapshotInfo; import static org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse.CancelMessage.CANCEL_ALREADY_CANCELLED_JOB; import static org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse.CancelMessage.CANCEL_ALREADY_DONE_JOB; @@ -102,6 +100,7 @@ import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; @@ -360,18 +359,17 @@ private DifferSnapshotInfo getDSIFromSI(SnapshotInfo snapshotInfo, snapshotOMMM.getStore().getDbLocation().getPath(); final UUID snapshotId = snapshotInfo.getSnapshotId(); final long dbTxSequenceNumber = snapshotInfo.getDbTxSequenceNumber(); - return new DifferSnapshotInfo( checkpointPath, snapshotId, dbTxSequenceNumber, - getColumnFamilyToKeyPrefixMap(snapshotOMMM, volumeName, bucketName), + snapshotOMMM.getTableBucketPrefix(volumeName, bucketName), ((RDBStore)snapshotOMMM.getStore()).getDb().getManagedRocksDb()); } @VisibleForTesting protected Set getSSTFileListForSnapshot(OmSnapshot snapshot, - List tablesToLookUp) { + Set tablesToLookUp) { return RdbUtil.getSSTFilesForComparison(((RDBStore)snapshot .getMetadataManager().getStore()).getDb().getManagedRocksDb(), tablesToLookUp); @@ -379,7 +377,7 @@ protected Set getSSTFileListForSnapshot(OmSnapshot snapshot, @VisibleForTesting protected Map getSSTFileMapForSnapshot(OmSnapshot snapshot, - List tablesToLookUp) throws IOException { + Set tablesToLookUp) throws IOException { return RdbUtil.getSSTFilesWithInodesForComparison(((RDBStore)snapshot .getMetadataManager().getStore()).getDb().getManagedRocksDb(), tablesToLookUp); @@ -893,9 +891,7 @@ void generateSnapshotDiffReport(final String jobKey, final BucketLayout bucketLayout = getBucketLayout(volumeName, bucketName, fromSnapshot.getMetadataManager()); - Map tablePrefixes = - getColumnFamilyToKeyPrefixMap(toSnapshot.getMetadataManager(), - volumeName, bucketName); + TablePrefixInfo tablePrefixes = toSnapshot.getMetadataManager().getTableBucketPrefix(volumeName, bucketName); boolean useFullDiff = snapshotForceFullDiff || forceFullDiff; boolean performNonNativeDiff = diffDisableNativeLibs || disableNativeDiff; @@ -964,9 +960,8 @@ void generateSnapshotDiffReport(final String jobKey, if (bucketLayout.isFileSystemOptimized()) { long bucketId = toSnapshot.getMetadataManager() .getBucketId(volumeName, bucketName); - String tablePrefix = getTablePrefix(tablePrefixes, - fromSnapshot.getMetadataManager() - .getDirectoryTable().getName()); + String tablePrefix = tablePrefixes.getTablePrefix(fromSnapshot.getMetadataManager() + .getDirectoryTable().getName()); oldParentIdPathMap.get().putAll(new FSODirectoryPathResolver( tablePrefix, bucketId, fromSnapshot.getMetadataManager().getDirectoryTable()) @@ -1050,7 +1045,7 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( final OmSnapshot fromSnapshot, final OmSnapshot toSnapshot, final SnapshotInfo fsInfo, final SnapshotInfo tsInfo, final boolean useFullDiff, final boolean skipNativeDiff, - final Map tablePrefixes, + final TablePrefixInfo tablePrefixes, final PersistentMap oldObjIdToKeyMap, final PersistentMap newObjIdToKeyMap, final PersistentMap objectIdToIsDirMap, @@ -1058,7 +1053,7 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( final Optional> newParentIds, final String diffDir, final String jobKey) throws IOException, RocksDBException { - List tablesToLookUp = Collections.singletonList(fsTable.getName()); + Set tablesToLookUp = Collections.singleton(fsTable.getName()); Set deltaFiles = getDeltaFiles(fromSnapshot, toSnapshot, tablesToLookUp, fsInfo, tsInfo, useFullDiff, tablePrefixes, diffDir, jobKey); @@ -1068,7 +1063,7 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( if (skipNativeDiff || !isNativeLibsLoaded) { Set inputFiles = getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp); ManagedRocksDB fromDB = ((RDBStore)fromSnapshot.getMetadataManager().getStore()).getDb().getManagedRocksDb(); - RocksDiffUtils.filterRelevantSstFiles(inputFiles, tablePrefixes, fromDB); + RocksDiffUtils.filterRelevantSstFiles(inputFiles, tablePrefixes, tablesToLookUp, fromDB); deltaFiles.addAll(inputFiles); } if (LOG.isDebugEnabled()) { @@ -1090,13 +1085,12 @@ void addToObjectIdMap(Table fsTable, PersistentMap objectIdToIsDirMap, Optional> oldParentIds, Optional> newParentIds, - Map tablePrefixes, String jobKey) throws IOException, RocksDBException { + TablePrefixInfo tablePrefixes, String jobKey) throws IOException, RocksDBException { if (deltaFiles.isEmpty()) { return; } - String tablePrefix = getTablePrefix(tablePrefixes, fsTable.getName()); - boolean isDirectoryTable = - fsTable.getName().equals(DIRECTORY_TABLE); + String tablePrefix = tablePrefixes.getTablePrefix(fsTable.getName()); + boolean isDirectoryTable = fsTable.getName().equals(DIRECTORY_TABLE); SstFileSetReader sstFileReader = new SstFileSetReader(deltaFiles); validateEstimatedKeyChangesAreInLimits(sstFileReader); long totalEstimatedKeysToProcess = sstFileReader.getEstimatedTotalKeys(); @@ -1106,9 +1100,7 @@ void addToObjectIdMap(Table fsTable, double[] checkpoint = new double[1]; checkpoint[0] = stepIncreasePct; if (Strings.isNotEmpty(tablePrefix)) { - char[] upperBoundCharArray = tablePrefix.toCharArray(); - upperBoundCharArray[upperBoundCharArray.length - 1] += 1; - sstFileReaderUpperBound = String.valueOf(upperBoundCharArray); + sstFileReaderUpperBound = getLexicographicallyHigherString(tablePrefix); } try (Stream keysToCheck = nativeRocksToolsLoaded ? sstFileReader.getKeyStreamWithTombstone(sstFileReaderLowerBound, sstFileReaderUpperBound) @@ -1170,11 +1162,11 @@ void addToObjectIdMap(Table fsTable, @SuppressWarnings("checkstyle:ParameterNumber") Set getDeltaFiles(OmSnapshot fromSnapshot, OmSnapshot toSnapshot, - List tablesToLookUp, + Set tablesToLookUp, SnapshotInfo fsInfo, SnapshotInfo tsInfo, boolean useFullDiff, - Map tablePrefixes, + TablePrefixInfo tablePrefixInfo, String diffDir, String jobKey) throws IOException { // TODO: [SNAPSHOT] Refactor the parameter list @@ -1193,7 +1185,7 @@ Set getDeltaFiles(OmSnapshot fromSnapshot, recordActivity(jobKey, SST_FILE_DELTA_DAG_WALK); LOG.debug("Calling RocksDBCheckpointDiffer"); try { - deltaFiles = differ.getSSTDiffListWithFullPath(toDSI, fromDSI, diffDir).map(HashSet::new); + deltaFiles = differ.getSSTDiffListWithFullPath(toDSI, fromDSI, tablesToLookUp, diffDir).map(HashSet::new); } catch (Exception exception) { recordActivity(jobKey, SST_FILE_DELTA_FULL_DIFF); LOG.warn("Failed to get SST diff file using RocksDBCheckpointDiffer. " + @@ -1214,7 +1206,7 @@ Set getDeltaFiles(OmSnapshot fromSnapshot, ManagedRocksDB toDB = ((RDBStore)toSnapshot.getMetadataManager().getStore()) .getDb().getManagedRocksDb(); Set diffFiles = getDiffFiles(fromSnapshot, toSnapshot, tablesToLookUp); - RocksDiffUtils.filterRelevantSstFiles(diffFiles, tablePrefixes, fromDB, toDB); + RocksDiffUtils.filterRelevantSstFiles(diffFiles, tablePrefixInfo, tablesToLookUp, fromDB, toDB); deltaFiles = Optional.of(diffFiles); } @@ -1223,7 +1215,7 @@ Set getDeltaFiles(OmSnapshot fromSnapshot, toSnapshot.getSnapshotTableKey())); } - private Set getDiffFiles(OmSnapshot fromSnapshot, OmSnapshot toSnapshot, List tablesToLookUp) { + private Set getDiffFiles(OmSnapshot fromSnapshot, OmSnapshot toSnapshot, Set tablesToLookUp) { Set diffFiles; try { Map fromSnapshotFiles = getSSTFileMapForSnapshot(fromSnapshot, tablesToLookUp); @@ -1303,7 +1295,7 @@ long generateDiffReport( final boolean isFSOBucket, final Optional> oldParentIdPathMap, final Optional> newParentIdPathMap, - final Map tablePrefix) { + final TablePrefixInfo tablePrefix) { LOG.info("Starting diff report generation for jobId: {}.", jobId); ColumnFamilyHandle deleteDiffColumnFamily = null; ColumnFamilyHandle renameDiffColumnFamily = null; @@ -1394,8 +1386,7 @@ long generateDiffReport( modifyDiffs.add(codecRegistry.asRawData(entry)); } } else { - String keyPrefix = getTablePrefix(tablePrefix, - (isDirectoryObject ? fsDirTable : fsTable).getName()); + String keyPrefix = tablePrefix.getTablePrefix((isDirectoryObject ? fsDirTable : fsTable).getName()); String oldKey = resolveBucketRelativePath(isFSOBucket, oldParentIdPathMap, oldKeyName, true); String newKey = resolveBucketRelativePath(isFSOBucket, @@ -1658,26 +1649,12 @@ private boolean areKeysEqual(WithObjectID oldKey, WithObjectID newKey) { return false; } - /** - * Get table prefix given a tableName. - */ - private String getTablePrefix(Map tablePrefixes, - String tableName) { - // In case of FSO - either File/Directory table - // the key Prefix would be volumeId/bucketId and - // in case of non-fso - volumeName/bucketName - if (tableName.equals(DIRECTORY_TABLE) || tableName.equals(FILE_TABLE)) { - return tablePrefixes.get(DIRECTORY_TABLE); - } - return tablePrefixes.get(KEY_TABLE); - } - /** * check if the given key is in the bucket specified by tablePrefix map. */ - boolean isKeyInBucket(String key, Map tablePrefixes, + boolean isKeyInBucket(String key, TablePrefixInfo tablePrefixInfo, String tableName) { - return key.startsWith(getTablePrefix(tablePrefixes, tableName)); + return key.startsWith(tablePrefixInfo.getTablePrefix(tableName)); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 63e7e38d518f..5897f4ae8916 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -18,18 +18,13 @@ package org.apache.hadoop.ozone.om.snapshot; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE; -import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; -import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TIMEOUT; import java.io.File; import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; import java.util.Optional; @@ -216,26 +211,6 @@ public static UUID getPreviousSnapshotId(SnapshotInfo snapInfo, SnapshotChainMan return null; } - /** - * Return a map column family to prefix for the keys in the table for - * the given volume and bucket. - * Column families, map is returned for, are keyTable, dirTable and fileTable. - */ - public static Map getColumnFamilyToKeyPrefixMap( - OMMetadataManager omMetadataManager, - String volumeName, - String bucketName - ) throws IOException { - String keyPrefix = omMetadataManager.getBucketKeyPrefix(volumeName, bucketName); - String keyPrefixFso = omMetadataManager.getBucketKeyPrefixFSO(volumeName, bucketName); - - Map columnFamilyToPrefixMap = new HashMap<>(); - columnFamilyToPrefixMap.put(KEY_TABLE, keyPrefix); - columnFamilyToPrefixMap.put(DIRECTORY_TABLE, keyPrefixFso); - columnFamilyToPrefixMap.put(FILE_TABLE, keyPrefixFso); - return columnFamilyToPrefixMap; - } - /** * Returns merged repeatedKeyInfo entry with the existing deleted entry in the table. * @param snapshotMoveKeyInfos keyInfos to be added. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 0ea625a0e064..65d73d9d92ea 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -61,7 +61,6 @@ import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyDouble; import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.anyMap; import static org.mockito.Mockito.anySet; import static org.mockito.Mockito.anyString; @@ -117,6 +116,7 @@ import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; @@ -436,6 +436,7 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { when(differ.getSSTDiffListWithFullPath( any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + anySet(), eq(diffDir)) ).thenReturn(Optional.of(Lists.newArrayList(randomStrings))); @@ -454,16 +455,17 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { Mockito.CALLS_REAL_METHODS)) { mockedRdbUtil.when(() -> RdbUtil.getSSTFilesForComparison(any(), any())) .thenReturn(Collections.singleton(RandomStringUtils.secure().nextAlphabetic(10))); - mockedRocksDiffUtils.when(() -> RocksDiffUtils.filterRelevantSstFiles(any(), any())).thenAnswer(i -> null); + mockedRocksDiffUtils.when(() -> RocksDiffUtils.filterRelevantSstFiles(any(), any(), anySet())) + .thenAnswer(i -> null); SnapshotDiffManager spy = spy(snapshotDiffManager); doNothing().when(spy).recordActivity(any(), any()); doNothing().when(spy).updateProgress(anyString(), anyDouble()); Set deltaFiles = spy.getDeltaFiles( fromSnapshot, toSnapshot, - Arrays.asList("cf1", "cf2"), fromSnapshotInfo, + Sets.newHashSet("cf1", "cf2"), fromSnapshotInfo, toSnapshotInfo, false, - Collections.emptyMap(), diffDir, diffJobKey); + new TablePrefixInfo(Collections.emptyMap()), diffDir, diffJobKey); assertEquals(randomStrings, deltaFiles); } rcFromSnapshot.close(); @@ -483,7 +485,7 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, Set deltaStrings = new HashSet<>(); mockedRdbUtil.when( - () -> RdbUtil.getSSTFilesForComparison(any(), anyList())) + () -> RdbUtil.getSSTFilesForComparison(any(), anySet())) .thenAnswer((Answer>) invocation -> { Set retVal = IntStream.range(0, numberOfFiles) .mapToObj(i -> RandomStringUtils.secure().nextAlphabetic(10)) @@ -493,7 +495,7 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, }); mockedRocksDiffUtils.when(() -> - RocksDiffUtils.filterRelevantSstFiles(anySet(), anyMap(), anyMap(), any(ManagedRocksDB.class), + RocksDiffUtils.filterRelevantSstFiles(anySet(), any(), anyMap(), anySet(), any(ManagedRocksDB.class), any(ManagedRocksDB.class))) .thenAnswer((Answer) invocationOnMock -> { invocationOnMock.getArgument(0, Set.class).stream() @@ -515,6 +517,7 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, when(differ.getSSTDiffListWithFullPath( any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + anySet(), anyString())) .thenReturn(Optional.ofNullable(Collections.emptyList())); } @@ -535,11 +538,11 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, Set deltaFiles = spy.getDeltaFiles( fromSnapshot, toSnapshot, - Arrays.asList("cf1", "cf2"), + Sets.newHashSet("cf1", "cf2"), fromSnapshotInfo, toSnapshotInfo, false, - Collections.emptyMap(), + new TablePrefixInfo(Collections.emptyMap()), snapDiffDir.getAbsolutePath(), diffJobKey); assertEquals(deltaStrings, deltaFiles); } @@ -555,7 +558,7 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) Set deltaStrings = new HashSet<>(); mockedRdbUtil.when( - () -> RdbUtil.getSSTFilesForComparison(any(), anyList())) + () -> RdbUtil.getSSTFilesForComparison(any(), anySet())) .thenAnswer((Answer>) invocation -> { Set retVal = IntStream.range(0, numberOfFiles) .mapToObj(i -> RandomStringUtils.secure().nextAlphabetic(10)) @@ -565,7 +568,7 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) }); mockedRocksDiffUtils.when(() -> - RocksDiffUtils.filterRelevantSstFiles(anySet(), anyMap(), anyMap(), any(ManagedRocksDB.class), + RocksDiffUtils.filterRelevantSstFiles(anySet(), any(), anyMap(), anySet(), any(ManagedRocksDB.class), any(ManagedRocksDB.class))) .thenAnswer((Answer) invocationOnMock -> { invocationOnMock.getArgument(0, Set.class).stream() @@ -588,6 +591,7 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) .getSSTDiffListWithFullPath( any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + anySet(), anyString()); UncheckedAutoCloseableSupplier rcFromSnapshot = @@ -607,11 +611,11 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) Set deltaFiles = spy.getDeltaFiles( fromSnapshot, toSnapshot, - Arrays.asList("cf1", "cf2"), + Sets.newHashSet("cf1", "cf2"), fromSnapshotInfo, toSnapshotInfo, false, - Collections.emptyMap(), + new TablePrefixInfo(Collections.emptyMap()), snapDiffDir.getAbsolutePath(), diffJobKey); assertEquals(deltaStrings, deltaFiles); @@ -704,7 +708,7 @@ public void testObjectIdMapWithTombstoneEntries(boolean nativeLibraryLoaded, String keyName = split[split.length - 1]; return Integer.parseInt(keyName.substring(3)) % 2 == 0; } - ).when(spy).isKeyInBucket(anyString(), anyMap(), anyString()); + ).when(spy).isKeyInBucket(anyString(), any(), anyString()); assertFalse(isKeyInBucket); PersistentMap oldObjectIdKeyMap = @@ -722,7 +726,7 @@ public void testObjectIdMapWithTombstoneEntries(boolean nativeLibraryLoaded, nativeLibraryLoaded, oldObjectIdKeyMap, newObjectIdKeyMap, objectIdsToCheck, Optional.of(oldParentIds), Optional.of(newParentIds), - ImmutableMap.of(DIRECTORY_TABLE, "", KEY_TABLE, "", FILE_TABLE, ""), ""); + new TablePrefixInfo(ImmutableMap.of(DIRECTORY_TABLE, "", KEY_TABLE, "", FILE_TABLE, "")), ""); try (ClosableIterator> oldObjectIdIter = oldObjectIdKeyMap.iterator()) { @@ -856,8 +860,7 @@ public void testGenerateDiffReport() throws IOException { return keyInfo; }); when(fromSnapTable.getName()).thenReturn("table"); - Map tablePrefixes = mock(Map.class); - when(tablePrefixes.get(anyString())).thenReturn(""); + TablePrefixInfo tablePrefixes = new TablePrefixInfo(Collections.emptyMap()); SnapshotDiffManager spy = spy(snapshotDiffManager); doReturn(true).when(spy) .areDiffJobAndSnapshotsActive(volumeName, bucketName, fromSnapName, @@ -1250,7 +1253,7 @@ public void testGenerateDiffReportWhenThereInEntry() { false, Optional.empty(), Optional.empty(), - Collections.emptyMap()); + new TablePrefixInfo(Collections.emptyMap())); assertEquals(0, totalDiffEntries); } @@ -1292,7 +1295,7 @@ public void testGenerateDiffReportFailure() throws IOException { false, Optional.empty(), Optional.empty(), - Collections.emptyMap()) + new TablePrefixInfo(Collections.emptyMap())) ); assertEquals("Old and new key name both are null", exception.getMessage()); @@ -1561,12 +1564,12 @@ public void testGetDeltaFilesWithFullDiff() throws IOException { } return null; }).when(spy).getSSTFileMapForSnapshot(Mockito.any(OmSnapshot.class), - Mockito.anyList()); + Mockito.anySet()); doNothing().when(spy).recordActivity(any(), any()); doNothing().when(spy).updateProgress(anyString(), anyDouble()); String diffJobKey = snap1 + DELIMITER + snap2; - Set deltaFiles = spy.getDeltaFiles(fromSnapshot, toSnapshot, Collections.emptyList(), snapshotInfo, - snapshotInfo, true, Collections.emptyMap(), null, diffJobKey); + Set deltaFiles = spy.getDeltaFiles(fromSnapshot, toSnapshot, Collections.emptySet(), snapshotInfo, + snapshotInfo, true, new TablePrefixInfo(Collections.emptyMap()), null, diffJobKey); Assertions.assertEquals(Sets.newHashSet("3.sst", "4.sst"), deltaFiles); } @@ -1580,21 +1583,21 @@ public void testGetSnapshotDiffReportHappyCase() throws Exception { SnapshotDiffManager spy = spy(snapshotDiffManager); doReturn(testDeltaFiles).when(spy).getDeltaFiles(any(OmSnapshot.class), - any(OmSnapshot.class), anyList(), eq(fromSnapInfo), eq(toSnapInfo), - eq(false), anyMap(), anyString(), + any(OmSnapshot.class), anySet(), eq(fromSnapInfo), eq(toSnapInfo), + eq(false), any(), anyString(), anyString()); doReturn(testDeltaFiles).when(spy) - .getSSTFileListForSnapshot(any(OmSnapshot.class), anyList()); + .getSSTFileListForSnapshot(any(OmSnapshot.class), anySet()); doNothing().when(spy).addToObjectIdMap(eq(keyInfoTable), eq(keyInfoTable), - any(), anyBoolean(), any(), any(), any(), any(), any(), anyMap(), anyString()); + any(), anyBoolean(), any(), any(), any(), any(), any(), any(), anyString()); doNothing().when(spy).checkReportsIntegrity(any(), anyInt(), anyInt()); doReturn(10L).when(spy).generateDiffReport(anyString(), any(), any(), any(), any(), any(), any(), any(), anyString(), anyString(), anyString(), anyString(), anyBoolean(), - any(), any(), anyMap()); + any(), any(), any()); doReturn(LEGACY).when(spy).getBucketLayout(VOLUME_NAME, BUCKET_NAME, omMetadataManager); diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestLdbRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestLdbRepair.java index ea2a1ef1e9ff..4bb48236a82f 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestLdbRepair.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestLdbRepair.java @@ -148,7 +148,7 @@ public void testRocksDBManualCompaction() throws Exception { List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(dbPath.toString()); try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath.toString(), cfDescList, cfHandleList)) { List liveFileMetaDataList = RdbUtil - .getLiveSSTFilesForCFs(db, Collections.singletonList(TEST_CF_NAME)); + .getLiveSSTFilesForCFs(db, Collections.singleton(TEST_CF_NAME)); for (LiveFileMetaData liveMetadata : liveFileMetaDataList) { assertEquals(0, liveMetadata.numDeletions(), "Tombstones found in file: " + liveMetadata.fileName()); From 41b7cfbcba3250090b6a3c3e0c54ea169f4705fa Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 17:22:32 -0400 Subject: [PATCH 094/126] HDDS-13830. Fix pmd Change-Id: If9cd8e82083f90997d7ce052408b33648d9b2c2a --- .../org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java index 64946e52e886..e7695debd619 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java @@ -45,7 +45,6 @@ public class TestOmSnapshotInfo { private static final UUID GLOBAL_PREVIOUS_SNAPSHOT_ID = PATH_PREVIOUS_SNAPSHOT_ID; private static final String SNAPSHOT_PATH = "test/path"; - private static final String CHECKPOINT_DIR = "checkpoint.testdir"; private static final long DB_TX_SEQUENCE_NUMBER = 12345L; private SnapshotInfo createSnapshotInfo() { From d0422aed82ef2a2b3189dbc3f7a7384b3f16ba95 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 2 Nov 2025 14:16:56 -0500 Subject: [PATCH 095/126] HDDS-13830. Fix mrge issue Change-Id: Ia02449ab5237ddb6021fa3a62ed66290c1dff83d --- .../om/TestOMDbCheckpointServletInodeBasedXfer.java | 4 ++-- .../om/OMDBCheckpointServletInodeBasedXfer.java | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java index f2b94182c809..a6ae3eaab21f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java @@ -240,7 +240,7 @@ public void write(int b) throws IOException { doCallRealMethod().when(omDbCheckpointServletMock) .transferSnapshotData(anySet(), any(), anySet(), any(), any(), anyMap()); doCallRealMethod().when(omDbCheckpointServletMock).createAndPrepareCheckpoint(anyBoolean()); - doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirsFromDB(any()); + doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirsFromDB(any(), any(), any()); } @ParameterizedTest @@ -748,7 +748,7 @@ private void setupClusterAndMocks(String volumeName, String bucketName, // Init the mock with the spyDbstore doCallRealMethod().when(omDbCheckpointServletMock).initialize(any(), any(), eq(false), any(), any(), eq(false)); - doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirsFromDB(any()); + doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirsFromDB(any(), any(), any()); omDbCheckpointServletMock.initialize(spyDbStore, om.getMetrics().getDBCheckpointMetrics(), false, om.getOmAdminUsernames(), om.getOmAdminGroups(), false); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java index 9d55e2203cf4..748329be83ae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -268,7 +269,8 @@ public void writeDbDataToStream(HttpServletRequest request, OutputStream destina // get the list of snapshots from the checkpoint try (OmMetadataManagerImpl checkpointMetadataManager = OmMetadataManagerImpl .createCheckpointMetadataManager(om.getConfiguration(), checkpoint)) { - snapshotPaths = getSnapshotDirsFromDB(checkpointMetadataManager); + snapshotPaths = getSnapshotDirsFromDB(omMetadataManager, checkpointMetadataManager, + snapshotLocalDataManager); } writeDBToArchive(sstFilesToExclude, getCompactionLogDir(), maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap, false); @@ -402,11 +404,9 @@ Set getSnapshotDirsFromDB(OMMetadataManager activeOMMetadataManager, OMMet Table.KeyValue kv = iter.next(); SnapshotInfo snapshotInfo = kv.getValue(); try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataMetaProvider snapLocalMeta = - localDataManager.getOmSnapshotLocalDataMeta(snapInfo.getSnapshotId())) { - OmSnapshotManager.getSnapshotPath(getConf(), - snapshotInfo.getCheckpointDirName()); - Path snapshotDir = getSnapshotPath(activeOMMetadataManager, - snapInfo.getSnapshotId(), snapLocalMeta.getMeta().getVersion()); + localDataManager.getOmSnapshotLocalDataMeta(snapshotInfo.getSnapshotId())) { + Path snapshotDir = getSnapshotPath(activeOMMetadataManager, snapshotInfo.getSnapshotId(), + snapLocalMeta.getMeta().getVersion()); snapshotPaths.add(snapshotDir); } } From bb5139f1f3d6b12b5b9ff2f5da875278fbd0309c Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 2 Nov 2025 23:04:52 -0500 Subject: [PATCH 096/126] HDDS-13452. Prevent snapshot defrag from happening before upgrade finalization Change-Id: I26ac4e914906fd1e764877577afb9a51a489b07f --- .../hdds/utils/MapBackedTableIterator.java | 6 +- .../hdds/utils/db/InMemoryTestTable.java | 13 +- .../utils/db/StringInMemoryTestTable.java | 30 ++++ .../hadoop/ozone/om/OmSnapshotManager.java | 41 ++--- .../ozone/om/SnapshotDefragService.java | 4 +- .../snapshot/OmSnapshotLocalDataManager.java | 54 ++++++- .../ozone/om/upgrade/OMLayoutFeature.java | 3 +- .../TestOmSnapshotLocalDataManager.java | 140 ++++++++++++++---- 8 files changed, 223 insertions(+), 68 deletions(-) create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/StringInMemoryTestTable.java diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/MapBackedTableIterator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/MapBackedTableIterator.java index 5af0e671d51b..5ce574509da7 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/MapBackedTableIterator.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/MapBackedTableIterator.java @@ -19,7 +19,7 @@ import java.util.Iterator; import java.util.Map; -import java.util.TreeMap; +import java.util.NavigableMap; import org.apache.hadoop.hdds.utils.db.Table; /** @@ -29,9 +29,9 @@ public class MapBackedTableIterator implements Table.KeyValueIterator> itr; private final String prefix; - private final TreeMap values; + private final NavigableMap values; - public MapBackedTableIterator(TreeMap values, String prefix) { + public MapBackedTableIterator(NavigableMap values, String prefix) { this.prefix = prefix; this.values = values; this.seekToFirst(); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java index 51baeb45177f..34d23a039e8e 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java @@ -21,21 +21,22 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; +import java.util.NavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; /** * InMemory Table implementation for tests. */ -public final class InMemoryTestTable implements Table { - private final Map map; +public class InMemoryTestTable implements Table { + private final NavigableMap map; public InMemoryTestTable() { this(Collections.emptyMap()); } public InMemoryTestTable(Map map) { - this.map = new ConcurrentHashMap<>(); + this.map = new ConcurrentSkipListMap<>(map); this.map.putAll(map); } @@ -124,4 +125,8 @@ public void dumpToFileWithPrefix(File externalFile, KEY prefix) { public void loadFromFile(File externalFile) { throw new UnsupportedOperationException(); } + + NavigableMap getMap() { + return map; + } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/StringInMemoryTestTable.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/StringInMemoryTestTable.java new file mode 100644 index 000000000000..c9f7e7abceda --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/StringInMemoryTestTable.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db; + +import org.apache.hadoop.hdds.utils.MapBackedTableIterator; + +/** + * + */ +public class StringInMemoryTestTable extends InMemoryTestTable { + @Override + public KeyValueIterator iterator(String prefix, KeyValueIterator.Type type) { + return new MapBackedTableIterator<>(getMap(), prefix); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 218843db971d..32c6dd4b3491 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -46,9 +46,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_REPORT_MAX_PAGE_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE; -import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; -import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TIMEOUT; @@ -62,7 +59,6 @@ import com.google.common.cache.CacheLoader; import com.google.common.cache.RemovalListener; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; import jakarta.annotation.Nonnull; import java.io.File; import java.io.IOException; @@ -111,10 +107,10 @@ import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import org.apache.ratis.util.function.CheckedFunction; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.LiveFileMetaData; import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -177,7 +173,7 @@ public final class OmSnapshotManager implements AutoCloseable { * families before compaction. */ public static final Set COLUMN_FAMILIES_TO_TRACK_IN_SNAPSHOT = - ImmutableSet.of(KEY_TABLE, DIRECTORY_TABLE, FILE_TABLE); + RocksDBCheckpointDiffer.COLUMN_FAMILIES_TO_TRACK_IN_DAG; private final long diffCleanupServiceInterval; private final int maxOpenSstFilesInSnapshotDb; @@ -198,12 +194,9 @@ public final class OmSnapshotManager implements AutoCloseable { public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); - this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager(), - omMetadataManager.getSnapshotChainManager(), ozoneManager.getConfiguration()); boolean isFilesystemSnapshotEnabled = ozoneManager.isFilesystemSnapshotEnabled(); LOG.info("Ozone filesystem snapshot feature is {}.", isFilesystemSnapshotEnabled ? "enabled" : "disabled"); - // Confirm that snapshot feature can be safely disabled. // Throw unchecked exception if that is not the case. if (!isFilesystemSnapshotEnabled && @@ -216,7 +209,6 @@ public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { "Please set config ozone.filesystem.snapshot.enabled to true and " + "try to start this Ozone Manager again."); } - this.options = new ManagedDBOptions(); this.options.setCreateIfMissing(true); this.columnFamilyOptions = new ManagedColumnFamilyOptions(); @@ -231,6 +223,12 @@ public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES, OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES_DEFAULT ); + CheckedFunction defaultSnapDBProvider = snapshotInfo -> + getSnapshotOmMetadataManager(snapshotInfo, 0, maxOpenSstFilesInSnapshotDb, + ozoneManager.getConfiguration()); + this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager(), + omMetadataManager.getSnapshotChainManager(), ozoneManager.getVersionManager(), defaultSnapDBProvider, + ozoneManager.getConfiguration()); Preconditions.checkArgument(this.maxOpenSstFilesInSnapshotDb >= -1, OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES + " value should be larger than or equal to -1."); @@ -238,7 +236,6 @@ public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { ColumnFamilyHandle snapDiffReportCf; ColumnFamilyHandle snapDiffPurgedJobCf; String dbPath = getDbPath(ozoneManager.getConfiguration()); - try { // Add default CF columnFamilyDescriptors.add(new ColumnFamilyDescriptor( @@ -377,6 +374,12 @@ public boolean canDisableFsSnapshot(OMMetadataManager ommm) { return isSnapshotInfoTableEmpty; } + private static OmMetadataManagerImpl getSnapshotOmMetadataManager(SnapshotInfo snapshotInfo, int version, + int maxOpenSstFilesInSnapshotDb, OzoneConfiguration conf) throws IOException { + return new OmMetadataManagerImpl(conf, snapshotInfo.getCheckpointDirName(version), + maxOpenSstFilesInSnapshotDb); + } + private CacheLoader createCacheLoader() { return new CacheLoader() { @@ -429,9 +432,8 @@ public OmSnapshot load(@Nonnull UUID snapshotId) throws IOException { } try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataMetaProvider snapshotLocalDataProvider = snapshotLocalDataManager.getOmSnapshotLocalDataMeta(snapshotInfo)) { - snapshotMetadataManager = new OmMetadataManagerImpl(conf, - snapshotInfo.getCheckpointDirName(snapshotLocalDataProvider.getMeta().getVersion()), - maxOpenSstFilesInSnapshotDb); + snapshotMetadataManager = getSnapshotOmMetadataManager(snapshotInfo, + snapshotLocalDataProvider.getMeta().getVersion(), maxOpenSstFilesInSnapshotDb, conf); } } catch (IOException e) { LOG.error("Failed to retrieve snapshot: {}", snapshotTableKey, e); @@ -588,17 +590,6 @@ public SnapshotDiffCleanupService getSnapshotDiffCleanupService() { return snapshotDiffCleanupService; } - /** - * Captures the list of SST files for keyTable, fileTable and directoryTable in the DB. - * @param store AOS or snapshot DB for not defragged or defragged snapshot respectively. - * @return a Map of (table, set of SST files corresponding to the table) - */ - public static List getSnapshotSSTFileList(RDBStore store) throws IOException { - return store.getDb().getLiveFilesMetaData().stream() - .filter(lfm -> COLUMN_FAMILIES_TO_TRACK_IN_SNAPSHOT.contains(StringUtils.bytes2String(lfm.columnFamilyName()))) - .collect(Collectors.toList()); - } - // Get OmSnapshot if the keyName has ".snapshot" key indicator @SuppressWarnings("unchecked") public UncheckedAutoCloseableSupplier getActiveFsMetadataOrSnapshot( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 61b6cba68010..ebf05a9e96c7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -45,6 +45,7 @@ import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.MultiSnapshotLocks; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -345,7 +346,8 @@ private boolean shouldRun() { return false; } // The service only runs if current OM node is ready - return running.get() && ozoneManager.isRunning(); + return running.get() && ozoneManager.isRunning() && + ozoneManager.getVersionManager().isAllowed(OMLayoutFeature.SNAPSHOT_DEFRAG); } public AtomicLong getSnapshotsDefraggedCount() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 21b79b0eb997..a3212642bcce 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -20,6 +20,10 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; +import static org.apache.hadoop.ozone.om.OmSnapshotManager.COLUMN_FAMILIES_TO_TRACK_IN_SNAPSHOT; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; +import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.SNAPSHOT_DEFRAG; +import static org.apache.ozone.rocksdb.util.RdbUtil.getLiveSSTFilesForCFs; import com.google.common.annotations.VisibleForTesting; import com.google.common.graph.GraphBuilder; @@ -52,7 +56,10 @@ import org.apache.hadoop.hdds.utils.Scheduler; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; @@ -63,8 +70,10 @@ import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager; import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager.HierarchicalResourceLock; import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.util.ObjectSerializer; import org.apache.hadoop.ozone.util.YamlSerializer; +import org.apache.ratis.util.function.CheckedFunction; import org.apache.ratis.util.function.CheckedSupplier; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.rocksdb.LiveFileMetaData; @@ -113,7 +122,8 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private volatile boolean closed; public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager, - SnapshotChainManager snapshotChainManager, + SnapshotChainManager snapshotChainManager, OMLayoutVersionManager omLayoutVersionManager, + CheckedFunction defaultSnapProvider, OzoneConfiguration configuration) throws IOException { this.localDataGraph = GraphBuilder.directed().build(); this.omMetadataManager = omMetadataManager; @@ -128,7 +138,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO this.versionNodeMap = new ConcurrentHashMap<>(); this.fullLock = new ReentrantReadWriteLock(); this.internalLock = new ReentrantReadWriteLock(); - init(configuration, snapshotChainManager); + init(configuration, snapshotChainManager, omLayoutVersionManager, defaultSnapProvider); } @VisibleForTesting @@ -172,7 +182,8 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf try (WritableOmSnapshotLocalDataProvider snapshotLocalData = new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(), () -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), - OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), + getLiveSSTFilesForCFs(snapshotStore.getDb().getManagedRocksDb(), + COLUMN_FAMILIES_TO_TRACK_IN_SNAPSHOT), snapshotInfo.getPathPreviousSnapshotId(), null), null))) { snapshotLocalData.commit(); @@ -242,6 +253,32 @@ private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapsh } } + private void addMissingSnapshotYamlFiles( + CheckedFunction defaultSnapProvider) throws IOException { + try (Table.KeyValueIterator itr = omMetadataManager.getSnapshotInfoTable().iterator()) { + while (itr.hasNext()) { + SnapshotInfo snapshotInfo = itr.next().getValue(); + UUID snapshotId = snapshotInfo.getSnapshotId(); + File snapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(snapshotId)); + // Create a yaml file for snapshots which are missing + if (!snapshotLocalDataFile.exists()) { + List sstList = Collections.emptyList(); + if (snapshotInfo.getSnapshotStatus() == SNAPSHOT_ACTIVE) { + try (OmMetadataManagerImpl snapshotMetadataManager = defaultSnapProvider.apply(snapshotInfo)) { + ManagedRocksDB snapDB = ((RDBStore)snapshotMetadataManager.getStore()).getDb().getManagedRocksDb(); + sstList = getLiveSSTFilesForCFs(snapDB, COLUMN_FAMILIES_TO_TRACK_IN_SNAPSHOT); + } catch (Exception e) { + throw new IOException(e); + } + } + OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapshotId, sstList, + snapshotInfo.getPathPreviousSnapshotId(), null); + snapshotLocalDataSerializer.save(snapshotLocalDataFile, snapshotLocalData); + } + } + } + } + void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { if (versionNodeMap.containsKey(snapshotLocalData.getSnapshotId())) { return; @@ -297,12 +334,18 @@ Map getSnapshotToBeCheckedForOrphans() { return snapshotToBeCheckedForOrphans; } - private void init(OzoneConfiguration configuration, SnapshotChainManager chainManager) throws IOException { + private void init(OzoneConfiguration configuration, SnapshotChainManager chainManager, + OMLayoutVersionManager layoutVersionManager, + CheckedFunction defaultSnapProvider) throws IOException { this.locks = omMetadataManager.getHierarchicalLockManager(); this.snapshotToBeCheckedForOrphans = new ConcurrentHashMap<>(); RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); File snapshotDir = new File(store.getSnapshotsParentDir()); + boolean upgradeNeeded = !layoutVersionManager.isAllowed(SNAPSHOT_DEFRAG); + if (upgradeNeeded) { + addMissingSnapshotYamlFiles(defaultSnapProvider); + } File[] localDataFiles = snapshotDir.listFiles( (dir, name) -> name.startsWith(checkpointPrefix) && name.endsWith(YAML_FILE_EXTENSION)); if (localDataFiles == null) { @@ -813,7 +856,8 @@ private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLo } public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { - List sstFiles = OmSnapshotManager.getSnapshotSSTFileList(snapshotStore); + List sstFiles = getLiveSSTFilesForCFs(snapshotStore.getDb().getManagedRocksDb(), + COLUMN_FAMILIES_TO_TRACK_IN_SNAPSHOT); OmSnapshotLocalData previousSnapshotLocalData = getPreviousSnapshotLocalData(); this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, previousSnapshotLocalData == null ? 0 : previousSnapshotLocalData.getVersion()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java index 7deeef51161c..057484c673e1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutFeature.java @@ -45,7 +45,8 @@ public enum OMLayoutFeature implements LayoutFeature { QUOTA(6, "Ozone quota re-calculate"), HBASE_SUPPORT(7, "Full support of hsync, lease recovery and listOpenFiles APIs for HBase"), - DELEGATION_TOKEN_SYMMETRIC_SIGN(8, "Delegation token signed by symmetric key"); + DELEGATION_TOKEN_SYMMETRIC_SIGN(8, "Delegation token signed by symmetric key"), + SNAPSHOT_DEFRAG(9, "Supporting defragmentation of snapshot"); /////////////////////////////// ///////////////////////////// // Example OM Layout Feature with Actions diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 9aa56d2dd027..58ac40e074a4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -24,6 +24,8 @@ import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -32,6 +34,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; @@ -40,6 +43,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; import java.io.File; import java.io.IOException; import java.nio.file.Files; @@ -70,7 +74,11 @@ import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; +import org.apache.hadoop.hdds.utils.db.StringInMemoryTestTable; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; @@ -80,8 +88,12 @@ import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager.HierarchicalResourceLock; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.WritableOmSnapshotLocalDataProvider; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.upgrade.LayoutFeature; import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ozone.rocksdb.util.SstFileInfo; +import org.apache.ratis.util.function.CheckedFunction; import org.assertj.core.util.Lists; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; @@ -96,6 +108,7 @@ import org.mockito.MockedStatic; import org.mockito.MockitoAnnotations; import org.rocksdb.LiveFileMetaData; +import org.rocksdb.RocksDB; import org.yaml.snakeyaml.Yaml; /** @@ -124,6 +137,9 @@ public class TestOmSnapshotLocalDataManager { @TempDir private Path tempDir; + @Mock + private OMLayoutVersionManager layoutVersionManager; + private OmSnapshotLocalDataManager localDataManager; private AutoCloseable mocks; @@ -177,6 +193,7 @@ public void setUp() throws IOException { purgedSnapshotIdMap.clear(); snapshotUtilMock.when(() -> OmSnapshotManager.isSnapshotPurged(any(), any(), any(), any())) .thenAnswer(i -> purgedSnapshotIdMap.getOrDefault(i.getArgument(2), false)); + when(layoutVersionManager.isAllowed(any(LayoutFeature.class))).thenReturn(true); conf.setInt(OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, -1); } @@ -242,6 +259,15 @@ private void mockLockManager() throws IOException { }); } + private OmSnapshotLocalDataManager getNewOmSnapshotLocalDataManager( + CheckedFunction provider) throws IOException { + return new OmSnapshotLocalDataManager(omMetadataManager, null, layoutVersionManager, provider, conf); + } + + private OmSnapshotLocalDataManager getNewOmSnapshotLocalDataManager() throws IOException { + return getNewOmSnapshotLocalDataManager(null); + } + private List createSnapshotLocalData(OmSnapshotLocalDataManager snapshotLocalDataManager, int numberOfSnapshots) throws IOException { SnapshotInfo previousSnapshotInfo = null; @@ -277,11 +303,14 @@ private void mockSnapshotStore(UUID snapshotId, List sstFiles) // Setup snapshot store mock File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId, 0).toFile(); assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs()); - when(snapshotStore.getDbLocation()).thenReturn(snapshotDbLocation); RocksDatabase rocksDatabase = mock(RocksDatabase.class); when(snapshotStore.getDb()).thenReturn(rocksDatabase); - when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); + ManagedRocksDB db = mock(ManagedRocksDB.class); + when(rocksDatabase.getManagedRocksDb()).thenReturn(db); + RocksDB rdb = mock(RocksDB.class); + when(db.get()).thenReturn(rdb); + when(rdb.getLiveFilesMetaData()).thenReturn(sstFiles); } /** @@ -295,7 +324,7 @@ private void mockSnapshotStore(UUID snapshotId, List sstFiles) @ParameterizedTest @ValueSource(booleans = {true, false}) public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); List snapshotIds = new ArrayList<>(); snapshotIds.add(null); snapshotIds.addAll(createSnapshotLocalData(localDataManager, 20)); @@ -347,7 +376,7 @@ public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOExcept @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionLockResolution(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); List snapshotIds = createSnapshotLocalData(localDataManager, 5); for (int snapIdx = 0; snapIdx < snapshotIds.size(); snapIdx++) { UUID snapId = snapshotIds.get(snapIdx); @@ -385,7 +414,7 @@ public void testVersionLockResolution(boolean read) throws IOException { @Test public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExisting() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); List snapshotIds = createSnapshotLocalData(localDataManager, 2); UUID snapId = snapshotIds.get(1); try (WritableOmSnapshotLocalDataProvider omSnapshotLocalDataProvider = @@ -401,7 +430,7 @@ public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExis @Test public void testUpdateTransactionInfo() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); UUID snapshotId = createSnapshotLocalData(localDataManager, 1).get(0); @@ -420,7 +449,7 @@ public void testUpdateTransactionInfo() throws IOException { @Test public void testAddVersionFromRDB() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); List snapshotIds = createSnapshotLocalData(localDataManager, 2); addVersionsToLocalData(localDataManager, snapshotIds.get(0), ImmutableMap.of(4, 5, 6, 8)); UUID snapId = snapshotIds.get(1); @@ -457,7 +486,7 @@ private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManage @ParameterizedTest @ValueSource(booleans = {true, false}) public void testOrphanVersionDeletionWithVersionDeletion(boolean purgeSnapshot) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); List snapshotIds = createSnapshotLocalData(localDataManager, 3); UUID firstSnapId = snapshotIds.get(0); UUID secondSnapId = snapshotIds.get(1); @@ -489,7 +518,7 @@ public void testOrphanVersionDeletionWithVersionDeletion(boolean purgeSnapshot) @ParameterizedTest @ValueSource(booleans = {true, false}) public void testOrphanVersionDeletionWithChainUpdate(boolean purgeSnapshot) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); List snapshotIds = createSnapshotLocalData(localDataManager, 3); UUID firstSnapId = snapshotIds.get(0); UUID secondSnapId = snapshotIds.get(1); @@ -526,7 +555,7 @@ public void testOrphanVersionDeletionWithChainUpdate(boolean purgeSnapshot) thro @ParameterizedTest @ValueSource(booleans = {true, false}) public void testWriteWithChainUpdate(boolean previousSnapshotExisting) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); List snapshotIds = createSnapshotLocalData(localDataManager, 3 + (previousSnapshotExisting ? 1 : 0)); int snapshotIdx = 1 + (previousSnapshotExisting ? 1 : 0); for (UUID snapshotId : snapshotIds) { @@ -578,7 +607,7 @@ public void testWriteWithChainUpdate(boolean previousSnapshotExisting) throws IO @ParameterizedTest @ValueSource(booleans = {true, false}) public void testWriteVersionValidation(boolean nextVersionExisting) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); List snapshotIds = createSnapshotLocalData(localDataManager, 3); UUID prevSnapId = snapshotIds.get(0); UUID snapId = snapshotIds.get(1); @@ -652,7 +681,7 @@ private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalData @ParameterizedTest @ValueSource(ints = {1, 2, 3}) public void testNeedsDefrag(int previousVersion) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); List snapshotIds = createSnapshotLocalData(localDataManager, 2); for (UUID snapshotId : snapshotIds) { try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapshotId)) { @@ -672,7 +701,7 @@ public void testNeedsDefrag(int previousVersion) throws IOException { @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionResolution(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); List snapshotIds = createSnapshotLocalData(localDataManager, 5); List> versionMaps = Arrays.asList( ImmutableMap.of(4, 1, 6, 3, 8, 9, 11, 15), @@ -715,7 +744,7 @@ public void testVersionResolution(boolean read) throws IOException { @Test public void testConstructor() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); assertNotNull(localDataManager); } @@ -724,7 +753,7 @@ public void testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOExce UUID snapshotId = UUID.randomUUID(); SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); File yamlPath = new File(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); assertNotNull(yamlPath); @@ -762,7 +791,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { mockedLiveFiles.add(createMockLiveFileMetaData("ot2.sst", "otherTable", "k1", "k2")); mockSnapshotStore(snapshotId, mockedLiveFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); Path snapshotYaml = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); // Create an existing YAML file for the snapshot assertTrue(snapshotYaml.toFile().createNewFile()); @@ -803,7 +832,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { bytes2String(lfm.largestKey()), bytes2String(lfm.columnFamilyName()))).collect(Collectors.toList()); mockSnapshotStore(snapshotId, sstFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); @@ -828,7 +857,7 @@ public void testGetOmSnapshotLocalDataWithSnapshotInfo() throws IOException { // Create and write snapshot local data file OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); // Write the file manually for testing Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId())); @@ -849,7 +878,7 @@ public void testGetOmSnapshotLocalDataWithMismatchedSnapshotId() throws IOExcept // Create local data with wrong snapshot ID OmSnapshotLocalData localData = createMockLocalData(wrongSnapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotId)); writeLocalDataToFile(localData, yamlPath); @@ -865,7 +894,7 @@ public void testGetOmSnapshotLocalDataWithFile() throws IOException { OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); Path yamlPath = tempDir.resolve("test-snapshot.yaml"); writeLocalDataToFile(localData, yamlPath); @@ -883,7 +912,7 @@ public void testAddVersionNodeWithDependents() throws IOException { .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); UUID snapshotId = versionIds.get(0); UUID previousSnapshotId = versionIds.get(1); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); // Create snapshot directory structure and files createSnapshotLocalDataFile(snapshotId, previousSnapshotId); createSnapshotLocalDataFile(previousSnapshotId, null); @@ -899,7 +928,7 @@ public void testAddVersionNodeWithDependentsAlreadyExists() throws IOException { createSnapshotLocalDataFile(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); @@ -921,7 +950,7 @@ public void testInitWithExistingYamlFiles() throws IOException { createSnapshotLocalDataFile(snapshotId, previousSnapshotId); // Initialize - should load existing files - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); assertNotNull(localDataManager); Map versionMap = @@ -930,6 +959,56 @@ public void testInitWithExistingYamlFiles() throws IOException { assertEquals(versionMap.keySet(), new HashSet<>(versionIds)); } + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testInitWithMissingYamlFiles(boolean needsUpgrade) throws IOException { + Table table = new StringInMemoryTestTable<>(); + when(omMetadataManager.getSnapshotInfoTable()).thenReturn(table); + UUID snap3 = UUID.randomUUID(); + UUID snap2 = UUID.randomUUID(); + UUID snap1 = UUID.randomUUID(); + CheckedFunction mockedProvider = (snapshotInfo) -> { + if (snapshotInfo.getSnapshotId().equals(snap2)) { + throw new IOException("SnapshotId should not be " + snap2 + " since it is deleted"); + } + mockSnapshotStore(snapshotInfo.getSnapshotId(), ImmutableList.of(createMockLiveFileMetaData( + snapshotInfo.getSnapshotId() + ".sst", KEY_TABLE, snapshotInfo.getSnapshotId() + "k1", + snapshotInfo.getSnapshotId() + "k2"))); + OmMetadataManagerImpl snapshotMetadataManager = mock(OmMetadataManagerImpl.class); + when(snapshotMetadataManager.getStore()).thenReturn(snapshotStore); + return snapshotMetadataManager; + }; + table.put("snap3", createMockSnapshotInfo(snap3, null, SNAPSHOT_ACTIVE)); + table.put("snap2", createMockSnapshotInfo(snap2, snap3, SNAPSHOT_DELETED)); + table.put("snap1", createMockSnapshotInfo(snap1, snap2, SNAPSHOT_ACTIVE)); + when(layoutVersionManager.isAllowed(eq(OMLayoutFeature.SNAPSHOT_DEFRAG))).thenReturn(!needsUpgrade); + localDataManager = getNewOmSnapshotLocalDataManager(mockedProvider); + if (needsUpgrade) { + assertEquals(ImmutableSet.of(snap1, snap2, snap3), localDataManager.getVersionNodeMap().keySet()); + Map previousMap = ImmutableMap.of(snap2, snap3, snap1, snap2); + Map> expectedSstFile = ImmutableMap.of( + snap3, ImmutableMap.of(0, + new OmSnapshotLocalData.VersionMeta(0, ImmutableList.of( + new SstFileInfo(snap3.toString(), snap3 + "k1", snap3 + "k2", KEY_TABLE)))), + snap1, ImmutableMap.of(0, + new OmSnapshotLocalData.VersionMeta(0, ImmutableList.of( + new SstFileInfo(snap1.toString(), snap1 + "k1", snap1 + "k2", KEY_TABLE)))), + snap2, ImmutableMap.of(0, + new OmSnapshotLocalData.VersionMeta(0, ImmutableList.of()))); + for (UUID snapshotId : localDataManager.getVersionNodeMap().keySet()) { + try (ReadableOmSnapshotLocalDataProvider readableOmSnapshotLocalDataProvider = + localDataManager.getOmSnapshotLocalData(snapshotId)) { + OmSnapshotLocalData snapshotLocalData = readableOmSnapshotLocalDataProvider.getSnapshotLocalData(); + assertEquals(snapshotId, snapshotLocalData.getSnapshotId()); + assertEquals(previousMap.get(snapshotId), snapshotLocalData.getPreviousSnapshotId()); + assertEquals(expectedSstFile.get(snapshotId), snapshotLocalData.getVersionSstFileInfos()); + } + } + } else { + assertEquals(ImmutableSet.of(), localDataManager.getVersionNodeMap().keySet()); + } + } + @Test public void testInitWithInvalidPathThrowsException() throws IOException { UUID snapshotId = UUID.randomUUID(); @@ -940,14 +1019,12 @@ public void testInitWithInvalidPathThrowsException() throws IOException { writeLocalDataToFile(localData, wrongPath); // Should throw IOException during init - assertThrows(IOException.class, () -> { - new OmSnapshotLocalDataManager(omMetadataManager, null, conf); - }); + assertThrows(IOException.class, this::getNewOmSnapshotLocalDataManager); } @Test public void testClose() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + localDataManager = getNewOmSnapshotLocalDataManager(); // Should not throw exception localDataManager.close(); } @@ -955,14 +1032,19 @@ public void testClose() throws IOException { // Helper methods private SnapshotInfo createMockSnapshotInfo(UUID snapshotId, UUID previousSnapshotId) { + return createMockSnapshotInfo(snapshotId, previousSnapshotId, null); + } + + private SnapshotInfo createMockSnapshotInfo(UUID snapshotId, UUID previousSnapshotId, + SnapshotInfo.SnapshotStatus snapshotStatus) { SnapshotInfo.Builder builder = SnapshotInfo.newBuilder() .setSnapshotId(snapshotId) .setName("snapshot-" + snapshotId); - + builder.setSnapshotStatus(snapshotStatus == null ? SNAPSHOT_ACTIVE : snapshotStatus); if (previousSnapshotId != null) { builder.setPathPreviousSnapshotId(previousSnapshotId); } - + return builder.build(); } From 298ae3651533e69ae3680a5b0c67a92f5d6eab0e Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 3 Nov 2025 01:04:32 -0500 Subject: [PATCH 097/126] HDDS-13849. Fix test Change-Id: I3ed6575a96a12c349bb229fb3772c256a986f242 --- .../hadoop/ozone/om/TestKeyManagerImpl.java | 22 +++++++++++++------ .../om/service/TestKeyDeletingService.java | 9 +++++++- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index d021cc752507..52cd9fb15cac 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -17,9 +17,14 @@ package org.apache.hadoop.ozone.om; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DELETED_DIR_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.SNAPSHOT_RENAMED_TABLE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; @@ -87,7 +92,7 @@ private List> mockTableIterator( for (int k = 0; k < numberOfKeysPerBucket; k++) { String key = String.format("/%s%010d/%s%010d/%s%010d", volumeNamePrefix, i, bucketNamePrefix, j, keyPrefix, k); - V value = valueClass == String.class ? (V) key : Mockito.mock(valueClass); + V value = valueClass == String.class ? (V) key : mock(valueClass); values.put(key, value); if ((volumeNumberFilter == null || i == volumeNumberFilter) && @@ -122,11 +127,12 @@ public void testGetDeletedKeyEntries(int numberOfVolumes, int numberOfBucketsPer String keyPrefix = "key"; OzoneConfiguration configuration = new OzoneConfiguration(); OMMetadataManager metadataManager = Mockito.mock(OMMetadataManager.class); - when(metadataManager.getBucketKeyPrefix(anyString(), anyString())).thenAnswer(i -> - "/" + i.getArguments()[0] + "/" + i.getArguments()[1] + "/"); KeyManagerImpl km = new KeyManagerImpl(null, null, metadataManager, configuration, null, null, null); Table mockedDeletedTable = Mockito.mock(Table.class); + when(mockedDeletedTable.getName()).thenReturn(DELETED_TABLE); when(metadataManager.getDeletedTable()).thenReturn(mockedDeletedTable); + when(metadataManager.getTableBucketPrefix(eq(DELETED_TABLE), anyString(), anyString())) + .thenAnswer(i -> "/" + i.getArguments()[1] + "/" + i.getArguments()[2] + "/"); CheckedFunction, Boolean, IOException> filter = (kv) -> Long.parseLong(kv.getKey().split(keyPrefix)[1]) % 2 == 0; List>> expectedEntries = mockTableIterator( @@ -166,11 +172,12 @@ public void testGetRenameKeyEntries(int numberOfVolumes, int numberOfBucketsPerV String keyPrefix = ""; OzoneConfiguration configuration = new OzoneConfiguration(); OMMetadataManager metadataManager = Mockito.mock(OMMetadataManager.class); - when(metadataManager.getBucketKeyPrefix(anyString(), anyString())).thenAnswer(i -> - "/" + i.getArguments()[0] + "/" + i.getArguments()[1] + "/"); KeyManagerImpl km = new KeyManagerImpl(null, null, metadataManager, configuration, null, null, null); Table mockedRenameTable = Mockito.mock(Table.class); + when(mockedRenameTable.getName()).thenReturn(SNAPSHOT_RENAMED_TABLE); when(metadataManager.getSnapshotRenamedTable()).thenReturn(mockedRenameTable); + when(metadataManager.getTableBucketPrefix(eq(SNAPSHOT_RENAMED_TABLE), anyString(), anyString())) + .thenAnswer(i -> "/" + i.getArguments()[1] + "/" + i.getArguments()[2] + "/"); CheckedFunction, Boolean, IOException> filter = (kv) -> Long.parseLong(kv.getKey().split("/")[3]) % 2 == 0; List> expectedEntries = mockTableIterator( @@ -204,11 +211,12 @@ public void testGetDeletedDirEntries(int numberOfVolumes, int numberOfBucketsPer startVolumeNumber = null; OzoneConfiguration configuration = new OzoneConfiguration(); OMMetadataManager metadataManager = Mockito.mock(OMMetadataManager.class); - when(metadataManager.getBucketKeyPrefixFSO(anyString(), anyString())).thenAnswer(i -> - "/" + i.getArguments()[0] + "/" + i.getArguments()[1] + "/"); KeyManagerImpl km = new KeyManagerImpl(null, null, metadataManager, configuration, null, null, null); Table mockedDeletedDirTable = Mockito.mock(Table.class); + when(mockedDeletedDirTable.getName()).thenReturn(DELETED_DIR_TABLE); when(metadataManager.getDeletedDirTable()).thenReturn(mockedDeletedDirTable); + when(metadataManager.getTableBucketPrefix(eq(DELETED_DIR_TABLE), anyString(), anyString())) + .thenAnswer(i -> "/" + i.getArguments()[1] + "/" + i.getArguments()[2] + "/"); List> expectedEntries = mockTableIterator( OmKeyInfo.class, mockedDeletedDirTable, numberOfVolumes, numberOfBucketsPerVolume, numberOfKeysPerBucket, volumeNamePrefix, bucketNamePrefix, keyPrefix, volumeNumber, bucketNumber, startVolumeNumber, startBucketNumber, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 8c51527b10d4..86c131e97d04 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -89,6 +89,7 @@ import org.apache.hadoop.ozone.om.PendingKeysDeletion.PurgedKey; import org.apache.hadoop.ozone.om.ScmBlockLocationTestingClient; import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.SstFilteringService; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -150,6 +151,7 @@ class TestKeyDeletingService extends OzoneTestBase { private OMMetadataManager metadataManager; private KeyDeletingService keyDeletingService; private DirectoryDeletingService directoryDeletingService; + private SstFilteringService sstFilteringService; private ScmBlockLocationTestingClient scmBlockTestingClient; private DeletingServiceMetrics metrics; @@ -183,7 +185,7 @@ private void createConfig(File testDir, int delintervalMs) { private void createSubject() throws Exception { OmTestManagers omTestManagers = new OmTestManagers(conf, scmBlockTestingClient, null); keyManager = omTestManagers.getKeyManager(); - + sstFilteringService = keyManager.getSnapshotSstFilteringService(); keyDeletingService = keyManager.getDeletingService(); directoryDeletingService = keyManager.getDirDeletingService(); writeClient = omTestManagers.getWriteClient(); @@ -557,6 +559,7 @@ void testSnapshotDeepClean() throws Exception { om.getMetadataManager().getKeyTable(BucketLayout.DEFAULT); // Suspend KeyDeletingService + sstFilteringService.pause(); keyDeletingService.suspend(); directoryDeletingService.suspend(); @@ -625,6 +628,7 @@ void testSnapshotDeepClean() throws Exception { assertTableRowCount(deletedTable, initialDeletedCount, metadataManager); checkSnapDeepCleanStatus(snapshotInfoTable, volumeName, true); } + sstFilteringService.resume(); } @Test @@ -802,7 +806,9 @@ void setup(@TempDir File testDir) throws Exception { @AfterEach void resume() { + directoryDeletingService.resume(); keyDeletingService.resume(); + sstFilteringService.resume(); } @AfterAll @@ -1251,6 +1257,7 @@ private static boolean assertTableRowCount(long expectedCount, LOG.info("{} actual row count={}, expectedCount={}", table.getName(), count.get(), expectedCount); }); + System.out.println("Swaminathan \t" + count.get() + "\t" + expectedCount); return count.get() == expectedCount; } From 018571c42d8ea7fb5f454f510aeb0b1a014f8bb7 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 5 Nov 2025 13:50:30 -0500 Subject: [PATCH 098/126] HDDS-13785. Address review comments Change-Id: Ied3c728c1f566bdd9327b6b5340892bed993cbf6 --- .../snapshot/OmSnapshotLocalDataManager.java | 54 ++++++++++++------- 1 file changed, 34 insertions(+), 20 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 21b79b0eb997..96f76e4b790c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -862,14 +862,44 @@ public synchronized void commit() throws IOException { throw new IOException("Unable to delete file " + snapshotLocalDataFile.getAbsolutePath()); } } - upsertNode(super.snapshotId, localDataVersionNodes, getSnapshotLocalData().getTransactionInfo() != null); + SnapshotVersionsMeta previousVersionMeta = upsertNode(super.snapshotId, localDataVersionNodes); + checkForOphanVersionsAndIncrementCount(super.snapshotId, previousVersionMeta, localDataVersionNodes, + getSnapshotLocalData().getTransactionInfo() != null); // Reset dirty bit resetDirty(); } } - private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, - boolean transactionInfoSet) throws IOException { + private void checkForOphanVersionsAndIncrementCount(UUID snapshotId, SnapshotVersionsMeta previousVersionsMeta, + SnapshotVersionsMeta currentVersionMeta, boolean transactionInfoSet) { + internalLock.readLock().lock(); + try { + if (previousVersionsMeta != null) { + Map currentVersionNodeMap = currentVersionMeta.getSnapshotVersions(); + Map previousVersionNodeMap = previousVersionsMeta.getSnapshotVersions(); + boolean versionsRemoved = previousVersionNodeMap.keySet().stream() + .anyMatch(version -> !currentVersionNodeMap.containsKey(version)); + + // The previous snapshotId could have become an orphan entry or could have orphan versions.(In case of + // version removals) + if (versionsRemoved || !Objects.equals(previousVersionsMeta.getPreviousSnapshotId(), + currentVersionMeta.getPreviousSnapshotId())) { + incrementOrphanCheckCount(previousVersionsMeta.getPreviousSnapshotId()); + } + // If the transactionInfo set, this means the snapshot has been purged and the entire YAML file could have + // become an orphan. Otherwise if the version is updated it + // could mean that there could be some orphan version present within the + // same snapshot. + if (transactionInfoSet || previousVersionsMeta.getVersion() != currentVersionMeta.getVersion()) { + incrementOrphanCheckCount(snapshotId); + } + } + } finally { + internalLock.readLock().unlock(); + } + } + + private SnapshotVersionsMeta upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { internalLock.writeLock().lock(); try { SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); @@ -877,14 +907,12 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, existingSnapVersions.getSnapshotVersions(); Map newVersions = snapshotVersions.getSnapshotVersions(); Map> predecessors = new HashMap<>(); - boolean versionsRemoved = false; // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { LocalDataVersionNode existingVersionNode = existingVersion.getValue(); // Create a copy of predecessors since the list of nodes returned would be a mutable set and it changes as the // nodes in the graph would change. predecessors.put(existingVersion.getKey(), new ArrayList<>(localDataGraph.predecessors(existingVersionNode))); - versionsRemoved = versionsRemoved || !newVersions.containsKey(existingVersion.getKey()); localDataGraph.removeNode(existingVersionNode); } @@ -896,21 +924,7 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, localDataGraph.putEdge(predecessor, entry.getValue()); } } - if (existingSnapVersions != null) { - // The previous snapshotId could have become an orphan entry or could have orphan versions.(In case of - // version removals) - if (versionsRemoved || !Objects.equals(existingSnapVersions.getPreviousSnapshotId(), - snapshotVersions.getPreviousSnapshotId())) { - incrementOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); - } - // If the transactionInfo set, this means the snapshot has been purged and the entire YAML file could have - // become an orphan. Otherwise if the version is updated it - // could mean that there could be some orphan version present within the - // same snapshot. - if (transactionInfoSet || existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { - incrementOrphanCheckCount(snapshotId); - } - } + return existingSnapVersions; } finally { internalLock.writeLock().unlock(); } From 6cd54dda954ba9088521bad5711c991297d9af70 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 5 Nov 2025 13:53:17 -0500 Subject: [PATCH 099/126] HDDS-13785. Address review comments Change-Id: I1105c70ad060e8a344d042eadc3fa65b1469b716 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 96f76e4b790c..20cefc4f44b1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -871,7 +871,7 @@ public synchronized void commit() throws IOException { } private void checkForOphanVersionsAndIncrementCount(UUID snapshotId, SnapshotVersionsMeta previousVersionsMeta, - SnapshotVersionsMeta currentVersionMeta, boolean transactionInfoSet) { + SnapshotVersionsMeta currentVersionMeta, boolean isPurgeTransactionSet) { internalLock.readLock().lock(); try { if (previousVersionsMeta != null) { @@ -890,7 +890,7 @@ private void checkForOphanVersionsAndIncrementCount(UUID snapshotId, SnapshotVer // become an orphan. Otherwise if the version is updated it // could mean that there could be some orphan version present within the // same snapshot. - if (transactionInfoSet || previousVersionsMeta.getVersion() != currentVersionMeta.getVersion()) { + if (isPurgeTransactionSet || previousVersionsMeta.getVersion() != currentVersionMeta.getVersion()) { incrementOrphanCheckCount(snapshotId); } } From 261a6698e95bc794c8f8e1e2648e34bc69162140 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 5 Nov 2025 14:16:02 -0500 Subject: [PATCH 100/126] HDDS-13785. Remove unnecessary read lock Change-Id: Idc77f8a9a9a6cecb43b69d5532cf1bb2c679ce78 --- .../snapshot/OmSnapshotLocalDataManager.java | 41 ++++++++----------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 20cefc4f44b1..70955fa05783 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -872,30 +872,25 @@ public synchronized void commit() throws IOException { private void checkForOphanVersionsAndIncrementCount(UUID snapshotId, SnapshotVersionsMeta previousVersionsMeta, SnapshotVersionsMeta currentVersionMeta, boolean isPurgeTransactionSet) { - internalLock.readLock().lock(); - try { - if (previousVersionsMeta != null) { - Map currentVersionNodeMap = currentVersionMeta.getSnapshotVersions(); - Map previousVersionNodeMap = previousVersionsMeta.getSnapshotVersions(); - boolean versionsRemoved = previousVersionNodeMap.keySet().stream() - .anyMatch(version -> !currentVersionNodeMap.containsKey(version)); - - // The previous snapshotId could have become an orphan entry or could have orphan versions.(In case of - // version removals) - if (versionsRemoved || !Objects.equals(previousVersionsMeta.getPreviousSnapshotId(), - currentVersionMeta.getPreviousSnapshotId())) { - incrementOrphanCheckCount(previousVersionsMeta.getPreviousSnapshotId()); - } - // If the transactionInfo set, this means the snapshot has been purged and the entire YAML file could have - // become an orphan. Otherwise if the version is updated it - // could mean that there could be some orphan version present within the - // same snapshot. - if (isPurgeTransactionSet || previousVersionsMeta.getVersion() != currentVersionMeta.getVersion()) { - incrementOrphanCheckCount(snapshotId); - } + if (previousVersionsMeta != null) { + Map currentVersionNodeMap = currentVersionMeta.getSnapshotVersions(); + Map previousVersionNodeMap = previousVersionsMeta.getSnapshotVersions(); + boolean versionsRemoved = previousVersionNodeMap.keySet().stream() + .anyMatch(version -> !currentVersionNodeMap.containsKey(version)); + + // The previous snapshotId could have become an orphan entry or could have orphan versions.(In case of + // version removals) + if (versionsRemoved || !Objects.equals(previousVersionsMeta.getPreviousSnapshotId(), + currentVersionMeta.getPreviousSnapshotId())) { + incrementOrphanCheckCount(previousVersionsMeta.getPreviousSnapshotId()); + } + // If the transactionInfo set, this means the snapshot has been purged and the entire YAML file could have + // become an orphan. Otherwise if the version is updated it + // could mean that there could be some orphan version present within the + // same snapshot. + if (isPurgeTransactionSet || previousVersionsMeta.getVersion() != currentVersionMeta.getVersion()) { + incrementOrphanCheckCount(snapshotId); } - } finally { - internalLock.readLock().unlock(); } } From 151e887d51e4faafc6196eff683681192cd544e1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 8 Nov 2025 14:12:29 -0500 Subject: [PATCH 101/126] HDDS-13867. SnapshotDiff delta file computation should happen based on LocalDataYaml file Change-Id: Ib6e33f1a2e445faa8a68941c614e73f3438e5e95 --- .../apache/ozone/rocksdb/util/RdbUtil.java | 18 +- .../ozone/rocksdb/util/SstFileInfo.java | 6 + .../ozone/rocksdiff/DifferSnapshotInfo.java | 72 +++-- .../rocksdiff/RocksDBCheckpointDiffer.java | 264 ++++++++-------- .../ozone/rocksdiff/RocksDiffUtils.java | 50 ++- .../TestRocksDBCheckpointDiffer.java | 294 +++++++++--------- .../ozone/rocksdiff/TestRocksDiffUtils.java | 98 +----- .../hadoop/ozone/freon/TestOMSnapshotDAG.java | 110 +++---- .../hadoop/ozone/om/OmSnapshotManager.java | 2 +- .../om/snapshot/SnapshotDiffManager.java | 149 +++++---- .../om/snapshot/TestSnapshotDiffManager.java | 127 +++++--- 11 files changed, 591 insertions(+), 599 deletions(-) diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java index 95c4a4aa2bb3..ac88102f800a 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java @@ -17,7 +17,6 @@ package org.apache.ozone.rocksdb.util; -import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -41,28 +40,25 @@ public final class RdbUtil { private RdbUtil() { } - public static List getLiveSSTFilesForCFs( - final ManagedRocksDB rocksDB, Set cfs) { + public static List getLiveSSTFilesForCFs(final ManagedRocksDB rocksDB, Set cfs) { return rocksDB.get().getLiveFilesMetaData().stream() .filter(lfm -> cfs.contains(StringUtils.bytes2String(lfm.columnFamilyName()))) .collect(Collectors.toList()); } - public static Set getSSTFilesForComparison( - final ManagedRocksDB rocksDB, Set cfs) { - return getLiveSSTFilesForCFs(rocksDB, cfs).stream() - .map(lfm -> new File(lfm.path(), lfm.fileName()).getPath()) + public static Set getSSTFilesForComparison(final ManagedRocksDB rocksDB, Set cfs) { + return getLiveSSTFilesForCFs(rocksDB, cfs).stream().map(SstFileInfo::new) .collect(Collectors.toCollection(HashSet::new)); } - public static Map getSSTFilesWithInodesForComparison(final ManagedRocksDB rocksDB, Set cfs) - throws IOException { + public static Map getSSTFilesWithInodesForComparison( + final ManagedRocksDB rocksDB, Set cfs) throws IOException { List liveSSTFilesForCFs = getLiveSSTFilesForCFs(rocksDB, cfs); - Map inodeToSstMap = new HashMap<>(); + Map inodeToSstMap = new HashMap<>(); for (LiveFileMetaData lfm : liveSSTFilesForCFs) { Path sstFilePath = Paths.get(lfm.path(), lfm.fileName()); Object inode = Files.readAttributes(sstFilePath, BasicFileAttributes.class).fileKey(); - inodeToSstMap.put(inode, sstFilePath.toString()); + inodeToSstMap.put(inode, new SstFileInfo(lfm)); } return inodeToSstMap; } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileInfo.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileInfo.java index 50f8c4c54d06..bc4975f98680 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileInfo.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileInfo.java @@ -18,7 +18,9 @@ package org.apache.ozone.rocksdb.util; import static org.apache.commons.io.FilenameUtils.getBaseName; +import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.SST_FILE_EXTENSION; +import java.nio.file.Path; import java.util.Objects; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.db.CopyObject; @@ -89,6 +91,10 @@ public int hashCode() { return Objects.hash(fileName, startKey, endKey, columnFamily); } + public Path getFilePath(Path directoryPath) { + return directoryPath.resolve(fileName + SST_FILE_EXTENSION); + } + @Override public SstFileInfo copyObject() { return new SstFileInfo(fileName, startKey, endKey, columnFamily); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/DifferSnapshotInfo.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/DifferSnapshotInfo.java index c72f56d5f116..840ed37a2463 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/DifferSnapshotInfo.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/DifferSnapshotInfo.java @@ -17,56 +17,68 @@ package org.apache.ozone.rocksdiff; +import static java.util.function.Function.identity; +import static java.util.stream.Collectors.toMap; + +import com.google.common.annotations.VisibleForTesting; +import java.nio.file.Path; +import java.util.List; +import java.util.NavigableMap; +import java.util.Set; import java.util.UUID; -import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import java.util.function.Function; +import java.util.stream.Collectors; +import org.apache.ozone.rocksdb.util.SstFileInfo; /** * Snapshot information node class for the differ. */ public class DifferSnapshotInfo { - private final String dbPath; - private final UUID snapshotId; - private final long snapshotGeneration; + private final UUID id; + private final long generation; + private final Function dbPathFunction; + private final NavigableMap> versionSstFiles; - private final TablePrefixInfo tablePrefixes; + public DifferSnapshotInfo(Function dbPathFunction, UUID id, long gen, + NavigableMap> sstFiles) { + this.dbPathFunction = dbPathFunction; + this.id = id; + generation = gen; + this.versionSstFiles = sstFiles; + } - private final ManagedRocksDB rocksDB; + public Path getDbPath(int version) { + return dbPathFunction.apply(version); + } - public DifferSnapshotInfo(String db, UUID id, long gen, - TablePrefixInfo tablePrefixInfo, - ManagedRocksDB rocksDB) { - dbPath = db; - snapshotId = id; - snapshotGeneration = gen; - tablePrefixes = tablePrefixInfo; - this.rocksDB = rocksDB; + public UUID getId() { + return id; } - public String getDbPath() { - return dbPath; + public long getGeneration() { + return generation; } - public UUID getSnapshotId() { - return snapshotId; + List getSstFiles(int version, Set tablesToLookup) { + return versionSstFiles.get(version).stream() + .filter(sstFileInfo -> tablesToLookup.contains(sstFileInfo.getColumnFamily())) + .collect(Collectors.toList()); } - public long getSnapshotGeneration() { - return snapshotGeneration; + @VisibleForTesting + SstFileInfo getSstFile(int version, String fileName) { + return versionSstFiles.get(version).stream() + .filter(sstFileInfo -> sstFileInfo.getFileName().equals(fileName)) + .findFirst().orElse(null); } - public TablePrefixInfo getTablePrefixes() { - return tablePrefixes; + Integer getMaxVersion() { + return versionSstFiles.lastKey(); } @Override public String toString() { - return String.format("DifferSnapshotInfo{dbPath='%s', snapshotID='%s', " + - "snapshotGeneration=%d, tablePrefixes size=%s}", - dbPath, snapshotId, snapshotGeneration, tablePrefixes.size()); - } - - public ManagedRocksDB getRocksDB() { - return rocksDB; + return String.format("DifferSnapshotInfo{dbPath='%s', id='%s', generation=%d}", + versionSstFiles.keySet().stream().collect(toMap(identity(), dbPathFunction::apply)), id, generation); } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index 1023efdacc3c..ff0eace3a02d 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -18,6 +18,7 @@ package org.apache.ozone.rocksdiff; import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.function.Function.identity; import static org.apache.commons.lang3.ArrayUtils.EMPTY_BYTE_ARRAY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT; @@ -32,7 +33,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; import com.google.common.graph.MutableGraph; import com.google.protobuf.InvalidProtocolBufferException; import java.io.BufferedWriter; @@ -73,6 +73,7 @@ import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.Scheduler; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; @@ -84,7 +85,7 @@ import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.compaction.log.CompactionLogEntry; -import org.apache.ozone.rocksdb.util.RdbUtil; +import org.apache.ozone.rocksdb.util.SstFileInfo; import org.rocksdb.AbstractEventListener; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.CompactionJobInfo; @@ -152,7 +153,7 @@ public class RocksDBCheckpointDiffer implements AutoCloseable, * Used to trim the file extension when writing compaction entries to the log * to save space. */ - static final String SST_FILE_EXTENSION = ".sst"; + public static final String SST_FILE_EXTENSION = ".sst"; public static final int SST_FILE_EXTENSION_LENGTH = SST_FILE_EXTENSION.length(); static final String PRUNED_SST_FILE_TEMP = "pruned.sst.tmp"; @@ -591,56 +592,6 @@ private void createLink(Path link, Path source) { } } - /** - * Helper method to trim the filename retrieved from LiveFileMetaData. - */ - private String trimSSTFilename(String filename) { - if (!filename.startsWith("/")) { - final String errorMsg = String.format( - "Invalid start of filename: '%s'. Expected '/'", filename); - LOG.error(errorMsg); - throw new RuntimeException(errorMsg); - } - if (!filename.endsWith(SST_FILE_EXTENSION)) { - final String errorMsg = String.format( - "Invalid extension of file: '%s'. Expected '%s'", - filename, SST_FILE_EXTENSION_LENGTH); - LOG.error(errorMsg); - throw new RuntimeException(errorMsg); - } - return filename.substring("/".length(), - filename.length() - SST_FILE_EXTENSION_LENGTH); - } - - /** - * Read the current Live manifest for a given RocksDB instance (Active or - * Checkpoint). - * @param rocksDB open rocksDB instance. - * @return a list of SST files (without extension) in the DB. - */ - public Set readRocksDBLiveFiles(ManagedRocksDB rocksDB, Set tableFilter) { - HashSet liveFiles = new HashSet<>(); - - final Set cfs = Sets.newHashSet( - org.apache.hadoop.hdds.StringUtils.bytes2String( - RocksDB.DEFAULT_COLUMN_FAMILY), "keyTable", "directoryTable", - "fileTable"); - // Note it retrieves only the selected column families by the descriptor - // i.e. keyTable, directoryTable, fileTable - List liveFileMetaDataList = - RdbUtil.getLiveSSTFilesForCFs(rocksDB, cfs); - LOG.debug("SST File Metadata for DB: " + rocksDB.get().getName()); - for (LiveFileMetaData m : liveFileMetaDataList) { - if (!tableFilter.contains(StringUtils.bytes2String(m.columnFamilyName()))) { - continue; - } - LOG.debug("File: {}, Level: {}", m.fileName(), m.level()); - final String trimmedFilename = trimSSTFilename(m.fileName()); - liveFiles.add(trimmedFilename); - } - return liveFiles; - } - /** * Process log line of compaction log text file input and populate the DAG. * It also adds the compaction log entry to compaction log table. @@ -784,12 +735,10 @@ private void preconditionChecksForLoadAllCompactionLogs() { * exist in backup directory before being involved in compactions), * and appends the extension '.sst'. */ - private String getSSTFullPath(String sstFilenameWithoutExtension, - String... dbPaths) { + private String getSSTFullPath(String sstFilenameWithoutExtension, Path... dbPaths) { // Try to locate the SST in the backup dir first - final Path sstPathInBackupDir = Paths.get(sstBackupDir, - sstFilenameWithoutExtension + SST_FILE_EXTENSION); + final Path sstPathInBackupDir = Paths.get(sstBackupDir, sstFilenameWithoutExtension + SST_FILE_EXTENSION); if (Files.exists(sstPathInBackupDir)) { return sstPathInBackupDir.toString(); } @@ -797,17 +746,15 @@ private String getSSTFullPath(String sstFilenameWithoutExtension, // SST file does not exist in the SST backup dir, this means the SST file // has not gone through any compactions yet and is only available in the // src DB directory or destDB directory - for (String dbPath : dbPaths) { - final Path sstPathInDBDir = Paths.get(dbPath, - sstFilenameWithoutExtension + SST_FILE_EXTENSION); + for (Path dbPath : dbPaths) { + final Path sstPathInDBDir = dbPath.resolve(sstFilenameWithoutExtension + SST_FILE_EXTENSION); if (Files.exists(sstPathInDBDir)) { return sstPathInDBDir.toString(); } } // TODO: More graceful error handling? - throw new RuntimeException("Unable to locate SST file: " + - sstFilenameWithoutExtension); + throw new RuntimeException("Unable to locate SST file: " + sstFilenameWithoutExtension); } /** @@ -824,22 +771,29 @@ private String getSSTFullPath(String sstFilenameWithoutExtension, * "/path/to/sstBackupDir/000060.sst"] */ public synchronized Optional> getSSTDiffListWithFullPath(DifferSnapshotInfo src, - DifferSnapshotInfo dest, Set tablesToLookup, - String sstFilesDirForSnapDiffJob) { + DifferSnapshotInfo dest, Map versionMap, TablePrefixInfo prefixInfo, + Set tablesToLookup, String sstFilesDirForSnapDiffJob) throws IOException { + int srcVersion = src.getMaxVersion(); + if (!versionMap.containsKey(srcVersion)) { + throw new IOException("No corresponding dest version corresponding srcVersion : " + srcVersion + " in " + + "versionMap : " + versionMap); + } + int destVersion = versionMap.get(srcVersion); + DifferSnapshotVersion srcSnapshotVersion = new DifferSnapshotVersion(src, src.getMaxVersion(), tablesToLookup); + DifferSnapshotVersion destSnapshotVersion = new DifferSnapshotVersion(dest, destVersion, tablesToLookup); - Optional> sstDiffList = getSSTDiffList(src, dest, tablesToLookup); + Optional> sstDiffList = getSSTDiffList(srcSnapshotVersion, destSnapshotVersion, prefixInfo, + tablesToLookup, srcVersion == 0); return sstDiffList.map(diffList -> diffList.stream() - .map( - sst -> { - String sstFullPath = getSSTFullPath(sst, src.getDbPath(), dest.getDbPath()); - Path link = Paths.get(sstFilesDirForSnapDiffJob, - sst + SST_FILE_EXTENSION); - Path srcFile = Paths.get(sstFullPath); - createLink(link, srcFile); - return link.toString(); - }) - .collect(Collectors.toList())); + .map(sst -> { + String sstFullPath = getSSTFullPath(sst.getFileName(), srcSnapshotVersion.getDbPath(), + destSnapshotVersion.getDbPath()); + Path link = sst.getFilePath(Paths.get(sstFilesDirForSnapDiffJob)); + Path srcFile = Paths.get(sstFullPath); + createLink(link, srcFile); + return link.toString(); + }).collect(Collectors.toList())); } /** @@ -853,53 +807,109 @@ public synchronized Optional> getSSTDiffListWithFullPath(DifferSnap * @param dest destination snapshot * @return A list of SST files without extension. e.g. ["000050", "000060"] */ - public synchronized Optional> getSSTDiffList(DifferSnapshotInfo src, - DifferSnapshotInfo dest, Set tablesToLookup) { + public synchronized Optional> getSSTDiffList(DifferSnapshotVersion src, + DifferSnapshotVersion dest, TablePrefixInfo prefixInfo, Set tablesToLookup, boolean useCompactionDag) { // TODO: Reject or swap if dest is taken after src, once snapshot chain // integration is done. - Set srcSnapFiles = readRocksDBLiveFiles(src.getRocksDB(), tablesToLookup); - Set destSnapFiles = readRocksDBLiveFiles(dest.getRocksDB(), tablesToLookup); - - Set fwdDAGSameFiles = new HashSet<>(); - Set fwdDAGDifferentFiles = new HashSet<>(); - - LOG.debug("Doing forward diff from src '{}' to dest '{}'", - src.getDbPath(), dest.getDbPath()); - internalGetSSTDiffList(src, dest, srcSnapFiles, destSnapFiles, - fwdDAGSameFiles, fwdDAGDifferentFiles); + Map fwdDAGSameFiles = new HashMap<>(); + Map fwdDAGDifferentFiles = new HashMap<>(); + if (useCompactionDag) { + LOG.debug("Doing forward diff from src '{}' to dest '{}'", src.getDbPath(), dest.getDbPath()); + internalGetSSTDiffList(src, dest, fwdDAGSameFiles, fwdDAGDifferentFiles); + } else { + Set srcSstFileInfos = new HashSet<>(src.getSstFileMap().values()); + Set destSstFileInfos = new HashSet<>(src.getSstFileMap().values()); + for (SstFileInfo srcSstFileInfo : srcSstFileInfos) { + if (destSstFileInfos.contains(srcSstFileInfo)) { + fwdDAGSameFiles.put(srcSstFileInfo.getFileName(), srcSstFileInfo); + } else { + fwdDAGDifferentFiles.put(srcSstFileInfo.getFileName(), srcSstFileInfo); + } + } + for (SstFileInfo destSstFileInfo : destSstFileInfos) { + if (srcSstFileInfos.contains(destSstFileInfo)) { + fwdDAGSameFiles.put(destSstFileInfo.getFileName(), destSstFileInfo); + } else { + fwdDAGDifferentFiles.put(destSstFileInfo.getFileName(), destSstFileInfo); + } + } + } if (LOG.isDebugEnabled()) { LOG.debug("Result of diff from src '" + src.getDbPath() + "' to dest '" + dest.getDbPath() + "':"); StringBuilder logSB = new StringBuilder(); logSB.append("Fwd DAG same SST files: "); - for (String file : fwdDAGSameFiles) { + for (String file : fwdDAGSameFiles.keySet()) { logSB.append(file).append(SPACE_DELIMITER); } LOG.debug(logSB.toString()); logSB.setLength(0); logSB.append("Fwd DAG different SST files: "); - for (String file : fwdDAGDifferentFiles) { + for (String file : fwdDAGDifferentFiles.keySet()) { logSB.append(file).append(SPACE_DELIMITER); } LOG.debug("{}", logSB); } // Check if the DAG traversal was able to reach all the destination SST files. - for (String destSnapFile : destSnapFiles) { - if (!fwdDAGSameFiles.contains(destSnapFile) && !fwdDAGDifferentFiles.contains(destSnapFile)) { + for (String destSnapFile : dest.getSstFiles()) { + if (!fwdDAGSameFiles.containsKey(destSnapFile) && !fwdDAGDifferentFiles.containsKey(destSnapFile)) { return Optional.empty(); } } - if (src.getTablePrefixes() != null && src.getTablePrefixes().size() != 0) { - RocksDiffUtils.filterRelevantSstFiles(fwdDAGDifferentFiles, src.getTablePrefixes(), - compactionDag.getCompactionMap(), tablesToLookup, src.getRocksDB(), dest.getRocksDB()); + if (prefixInfo != null && prefixInfo.size() != 0) { + RocksDiffUtils.filterRelevantSstFiles(fwdDAGDifferentFiles, tablesToLookup, prefixInfo); + } + return Optional.of(new ArrayList<>(fwdDAGDifferentFiles.values())); + } + + /** + * This class represents a version of a snapshot in a database differ operation. + * It contains metadata associated with a specific snapshot version, including + * SST file information, generation id, and the database path for the given version. + * + * Designed to work with `DifferSnapshotInfo`, this class allows the retrieval of + * snapshot-related metadata and facilitates mapping of SST files for version comparison + * and other operations. + * + * The core functionality is to store and provide read-only access to: + * - SST file information for a specified snapshot version. + * - Snapshot generation identifier. + * - Path to the database directory corresponding to the snapshot version. + */ + public static class DifferSnapshotVersion { + private Map sstFiles; + private long generation; + private Path dbPath; + + public DifferSnapshotVersion(DifferSnapshotInfo differSnapshotInfo, int version, + Set tablesToLookup) { + this.sstFiles = differSnapshotInfo.getSstFiles(version, tablesToLookup) + .stream().collect(Collectors.toMap(SstFileInfo::getFileName, identity())); + this.generation = differSnapshotInfo.getGeneration(); + this.dbPath = differSnapshotInfo.getDbPath(version); + } + + private Path getDbPath() { + return dbPath; + } + + private long getGeneration() { + return generation; + } + + private Set getSstFiles() { + return sstFiles.keySet(); + } + + private Map getSstFileMap() { + return Collections.unmodifiableMap(sstFiles); } - return Optional.of(new ArrayList<>(fwdDAGDifferentFiles)); } /** @@ -911,30 +921,26 @@ public synchronized Optional> getSSTDiffList(DifferSnapshotInfo src * diffing). Otherwise, add it to the differentFiles map, as it will * need further diffing. */ - synchronized void internalGetSSTDiffList( - DifferSnapshotInfo src, - DifferSnapshotInfo dest, - Set srcSnapFiles, - Set destSnapFiles, - Set sameFiles, - Set differentFiles) { + synchronized void internalGetSSTDiffList(DifferSnapshotVersion src, DifferSnapshotVersion dest, + Map sameFiles, Map differentFiles) { Preconditions.checkArgument(sameFiles.isEmpty(), "Set must be empty"); Preconditions.checkArgument(differentFiles.isEmpty(), "Set must be empty"); - - for (String fileName : srcSnapFiles) { - if (destSnapFiles.contains(fileName)) { + Map destSnapFiles = dest.getSstFileMap(); + for (Map.Entry sstFileEntry : src.getSstFileMap().entrySet()) { + String fileName = sstFileEntry.getKey(); + SstFileInfo sstFileInfo = sstFileEntry.getValue(); + if (destSnapFiles.containsKey(fileName)) { LOG.debug("Source '{}' and destination '{}' share the same SST '{}'", src.getDbPath(), dest.getDbPath(), fileName); - sameFiles.add(fileName); + sameFiles.put(fileName, sstFileInfo); continue; } CompactionNode infileNode = compactionDag.getCompactionNode(fileName); if (infileNode == null) { - LOG.debug("Source '{}' SST file '{}' is never compacted", - src.getDbPath(), fileName); - differentFiles.add(fileName); + LOG.debug("Source '{}' SST file '{}' is never compacted", src.getDbPath(), fileName); + differentFiles.put(fileName, sstFileInfo); continue; } @@ -944,15 +950,12 @@ synchronized void internalGetSSTDiffList( // Traversal level/depth indicator for debug print int level = 1; while (!currentLevel.isEmpty()) { - LOG.debug("Traversal level: {}. Current level has {} nodes.", - level++, currentLevel.size()); + LOG.debug("Traversal level: {}. Current level has {} nodes.", level++, currentLevel.size()); if (level >= 1000000) { - final String errorMsg = String.format( - "Graph traversal level exceeded allowed maximum (%d). " - + "This could be due to invalid input generating a " - + "loop in the traversal path. Same SSTs found so far: %s, " - + "different SSTs: %s", level, sameFiles, differentFiles); + final String errorMsg = String.format("Graph traversal level exceeded allowed maximum (%d). " + + "This could be due to invalid input generating a loop in the traversal path. Same SSTs found so " + + "far: %s, different SSTs: %s", level, sameFiles, differentFiles); LOG.error(errorMsg); // Clear output in case of error. Expect fall back to full diff sameFiles.clear(); @@ -964,43 +967,42 @@ synchronized void internalGetSSTDiffList( final Set nextLevel = new HashSet<>(); for (CompactionNode current : currentLevel) { LOG.debug("Processing node: '{}'", current.getFileName()); - if (current.getSnapshotGeneration() < dest.getSnapshotGeneration()) { + if (current.getSnapshotGeneration() < dest.getGeneration()) { LOG.debug("Current node's snapshot generation '{}' " + "reached destination snapshot's '{}'. " + "Src '{}' and dest '{}' have different SST file: '{}'", - current.getSnapshotGeneration(), dest.getSnapshotGeneration(), + current.getSnapshotGeneration(), dest.getGeneration(), src.getDbPath(), dest.getDbPath(), current.getFileName()); - differentFiles.add(current.getFileName()); + differentFiles.put(current.getFileName(), current); continue; } Set successors = compactionDag.getForwardCompactionDAG().successors(current); if (successors.isEmpty()) { - LOG.debug("No further compaction happened to the current file. " + - "Src '{}' and dest '{}' have different file: {}", - src.getDbPath(), dest.getDbPath(), current.getFileName()); - differentFiles.add(current.getFileName()); + LOG.debug("No further compaction happened to the current file. Src '{}' and dest '{}' " + + "have different file: {}", src.getDbPath(), dest.getDbPath(), current.getFileName()); + differentFiles.put(current.getFileName(), current); continue; } for (CompactionNode nextNode : successors) { - if (sameFiles.contains(nextNode.getFileName()) || - differentFiles.contains(nextNode.getFileName())) { + if (sameFiles.containsKey(nextNode.getFileName()) || + differentFiles.containsKey(nextNode.getFileName())) { LOG.debug("Skipping known processed SST: {}", nextNode.getFileName()); continue; } - if (destSnapFiles.contains(nextNode.getFileName())) { - LOG.debug("Src '{}' and dest '{}' have the same SST: {}", - src.getDbPath(), dest.getDbPath(), nextNode.getFileName()); - sameFiles.add(nextNode.getFileName()); + if (destSnapFiles.containsKey(nextNode.getFileName())) { + LOG.debug("Src '{}' and dest '{}' have the same SST: {}", src.getDbPath(), dest.getDbPath(), + nextNode.getFileName()); + sameFiles.put(nextNode.getFileName(), destSnapFiles.get(nextNode.getFileName())); continue; } // Queue different SST to the next level - LOG.debug("Src '{}' and dest '{}' have a different SST: {}", - src.getDbPath(), dest.getDbPath(), nextNode.getFileName()); + LOG.debug("Src '{}' and dest '{}' have a different SST: {}", src.getDbPath(), dest.getDbPath(), + nextNode.getFileName()); nextLevel.add(nextNode); } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java index 7d9512768bc1..1e4100c66679 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java @@ -20,17 +20,11 @@ import static org.apache.hadoop.hdds.StringUtils.getFirstNChars; import com.google.common.annotations.VisibleForTesting; -import java.util.Collections; -import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Set; -import org.apache.commons.io.FilenameUtils; import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; -import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.rocksdb.util.SstFileInfo; -import org.rocksdb.LiveFileMetaData; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,40 +48,32 @@ public static boolean isKeyWithPrefixPresent(String prefixForColumnFamily, && prefixForColumnFamily.compareTo(endKeyPrefix) <= 0; } - public static void filterRelevantSstFiles(Set inputFiles, - TablePrefixInfo tablePrefixInfo, - Set columnFamiliesToLookup, - ManagedRocksDB... dbs) { - filterRelevantSstFiles(inputFiles, tablePrefixInfo, Collections.emptyMap(), columnFamiliesToLookup, dbs); + /** + * Filter sst files based on prefixes. + */ + public static Map filterRelevantSstFiles(Map inputFiles, + Set tablesToLookup, TablePrefixInfo tablePrefixInfo) { + for (Iterator> fileIterator = inputFiles.entrySet().iterator(); fileIterator.hasNext();) { + SstFileInfo sstFileInfo = fileIterator.next().getValue(); + if (shouldSkipNode(sstFileInfo, tablePrefixInfo, tablesToLookup)) { + fileIterator.remove(); + } + } + return inputFiles; } /** * Filter sst files based on prefixes. */ - public static void filterRelevantSstFiles(Set inputFiles, - TablePrefixInfo tablePrefixInfo, - Map preExistingCompactionNodes, - Set columnFamiliesToLookup, - ManagedRocksDB... dbs) { - Map liveFileMetaDataMap = new HashMap<>(); - int dbIdx = 0; - for (Iterator fileIterator = - inputFiles.iterator(); fileIterator.hasNext();) { - String filename = FilenameUtils.getBaseName(fileIterator.next()); - while (!preExistingCompactionNodes.containsKey(filename) && !liveFileMetaDataMap.containsKey(filename) - && dbIdx < dbs.length) { - liveFileMetaDataMap.putAll(dbs[dbIdx].getLiveMetadataForSSTFiles()); - dbIdx += 1; - } - CompactionNode compactionNode = preExistingCompactionNodes.get(filename); - if (compactionNode == null) { - compactionNode = new CompactionNode(new CompactionFileInfo.Builder(filename) - .setValues(liveFileMetaDataMap.get(filename)).build()); - } - if (shouldSkipNode(compactionNode, tablePrefixInfo, columnFamiliesToLookup)) { + public static Set filterRelevantSstFiles(Set inputFiles, + Set tablesToLookup, TablePrefixInfo tablePrefixInfo) { + for (Iterator fileIterator = inputFiles.iterator(); fileIterator.hasNext();) { + SstFileInfo sstFileInfo = fileIterator.next(); + if (shouldSkipNode(sstFileInfo, tablePrefixInfo, tablesToLookup)) { fileIterator.remove(); } } + return inputFiles; } @VisibleForTesting diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index c59f6aeb491f..91c5fd070273 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -41,7 +41,9 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -67,6 +69,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; @@ -76,10 +79,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Consumer; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; -import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; @@ -105,13 +106,15 @@ import org.apache.hadoop.util.Time; import org.apache.ozone.compaction.log.CompactionFileInfo; import org.apache.ozone.compaction.log.CompactionLogEntry; -import org.apache.ozone.rocksdb.util.RdbUtil; +import org.apache.ozone.rocksdb.util.SstFileInfo; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DifferSnapshotVersion; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.NodeComparator; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; @@ -131,6 +134,8 @@ * Test RocksDBCheckpointDiffer basic functionality. */ public class TestRocksDBCheckpointDiffer { + @TempDir + private static File dbDir; private static final Logger LOG = LoggerFactory.getLogger(TestRocksDBCheckpointDiffer.class); @@ -279,8 +284,6 @@ public class TestRocksDBCheckpointDiffer { private final List cpDirList = new ArrayList<>(); - private final List> colHandles = new ArrayList<>(); - private static final String ACTIVE_DB_DIR_NAME = "./rocksdb-data"; private static final String METADATA_DIR_NAME = "./metadata"; private static final String COMPACTION_LOG_DIR_NAME = "compaction-log"; @@ -408,7 +411,7 @@ public void cleanUp() { } } - private static List getPrunedCompactionEntries(boolean prune, Map metadata) { + private static List getPrunedCompactionEntries(boolean prune, Map metadata) { List entries = new ArrayList<>(); if (!prune) { entries.add(createCompactionEntry(1, @@ -431,6 +434,13 @@ private static List getPrunedCompactionEntries(boolean prune return entries; } + private static DifferSnapshotInfo mockDifferSnapshotVersion(String dbPath, long generation) { + DifferSnapshotInfo differSnapshotInfo = mock(DifferSnapshotInfo.class); + when(differSnapshotInfo.getDbPath(anyInt())).thenReturn(Paths.get(dbPath)); + when(differSnapshotInfo.getGeneration()).thenReturn(generation); + return differSnapshotInfo; + } + /** * Test cases for testGetSSTDiffListWithoutDB. */ @@ -504,21 +514,15 @@ private static Stream casesGetSSTDiffListWithoutDB() { Arrays.asList("000105", "000095", "000088"), Collections.singletonList("000107")) ); - - DifferSnapshotInfo snapshotInfo1 = new DifferSnapshotInfo( - "/path/to/dbcp1", UUID.randomUUID(), 3008L, null, Mockito.mock(ManagedRocksDB.class)); - DifferSnapshotInfo snapshotInfo2 = new DifferSnapshotInfo( - "/path/to/dbcp2", UUID.randomUUID(), 14980L, null, Mockito.mock(ManagedRocksDB.class)); - DifferSnapshotInfo snapshotInfo3 = new DifferSnapshotInfo( - "/path/to/dbcp3", UUID.randomUUID(), 17975L, null, Mockito.mock(ManagedRocksDB.class)); - DifferSnapshotInfo snapshotInfo4 = new DifferSnapshotInfo( - "/path/to/dbcp4", UUID.randomUUID(), 18000L, null, Mockito.mock(ManagedRocksDB.class)); + Path baseDir = dbDir.toPath().resolve("path").resolve("to").toAbsolutePath(); + DifferSnapshotInfo snapshotInfo1 = mockDifferSnapshotVersion(baseDir.resolve("dbcp1").toString(), 3008L); + DifferSnapshotInfo snapshotInfo2 = mockDifferSnapshotVersion(baseDir.resolve("dbcp2").toString(), 14980L); + DifferSnapshotInfo snapshotInfo3 = mockDifferSnapshotVersion(baseDir.resolve("dbcp3").toString(), 17975L); + DifferSnapshotInfo snapshotInfo4 = mockDifferSnapshotVersion(baseDir.resolve("dbcp4").toString(), 18000L); TablePrefixInfo prefixMap = new TablePrefixInfo(ImmutableMap.of("col1", "c", "col2", "d")); - DifferSnapshotInfo snapshotInfo5 = new DifferSnapshotInfo( - "/path/to/dbcp2", UUID.randomUUID(), 0L, prefixMap, Mockito.mock(ManagedRocksDB.class)); - DifferSnapshotInfo snapshotInfo6 = new DifferSnapshotInfo( - "/path/to/dbcp2", UUID.randomUUID(), 100L, prefixMap, Mockito.mock(ManagedRocksDB.class)); + DifferSnapshotInfo snapshotInfo5 = mockDifferSnapshotVersion(baseDir.resolve("dbcp2").toString(), 0L); + DifferSnapshotInfo snapshotInfo6 = mockDifferSnapshotVersion(baseDir.resolve("dbcp2").toString(), 100L); Set snapshotSstFiles1 = ImmutableSet.of("000059", "000053"); Set snapshotSstFiles2 = ImmutableSet.of("000088", "000059", @@ -550,7 +554,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { "000095"), ImmutableSet.of("000066", "000105", "000080", "000087", "000073", "000095"), - false, Collections.emptyMap()), + false, Collections.emptyMap(), null), Arguments.of("Test 2: Compaction log file crafted input: " + "One source ('to' snapshot) SST file is never compacted " + "(newly flushed)", @@ -563,7 +567,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("000088", "000105", "000059", "000053", "000095"), ImmutableSet.of("000108"), ImmutableSet.of("000108"), - false, Collections.emptyMap()), + false, Collections.emptyMap(), null), Arguments.of("Test 3: Compaction log file crafted input: " + "Same SST files found during SST expansion", compactionLog, @@ -575,7 +579,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("000066", "000059", "000053"), ImmutableSet.of("000080", "000087", "000073", "000095"), ImmutableSet.of("000080", "000087", "000073", "000095"), - false, Collections.emptyMap()), + false, Collections.emptyMap(), null), Arguments.of("Test 4: Compaction log file crafted input: " + "Skipping known processed SST.", compactionLog, @@ -587,7 +591,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), - true, Collections.emptyMap()), + true, Collections.emptyMap(), null), Arguments.of("Test 5: Compaction log file hit snapshot" + " generation early exit condition", compactionLog, @@ -599,7 +603,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("000059", "000053"), ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), - false, Collections.emptyMap()), + false, Collections.emptyMap(), null), Arguments.of("Test 6: Compaction log table regular case. " + "Expands expandable SSTs in the initial diff.", null, @@ -613,7 +617,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { "000095"), ImmutableSet.of("000066", "000105", "000080", "000087", "000073", "000095"), - false, Collections.emptyMap()), + false, Collections.emptyMap(), null), Arguments.of("Test 7: Compaction log table crafted input: " + "One source ('to' snapshot) SST file is never compacted " + "(newly flushed)", @@ -626,7 +630,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("000088", "000105", "000059", "000053", "000095"), ImmutableSet.of("000108"), ImmutableSet.of("000108"), - false, Collections.emptyMap()), + false, Collections.emptyMap(), null), Arguments.of("Test 8: Compaction log table crafted input: " + "Same SST files found during SST expansion", null, @@ -638,7 +642,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("000066", "000059", "000053"), ImmutableSet.of("000080", "000087", "000073", "000095"), ImmutableSet.of("000080", "000087", "000073", "000095"), - false, Collections.emptyMap()), + false, Collections.emptyMap(), null), Arguments.of("Test 9: Compaction log table crafted input: " + "Skipping known processed SST.", null, @@ -650,7 +654,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), - true, Collections.emptyMap()), + true, Collections.emptyMap(), null), Arguments.of("Test 10: Compaction log table hit snapshot " + "generation early exit condition", null, @@ -662,7 +666,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("000059", "000053"), ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), ImmutableSet.of("000066", "000080", "000087", "000073", "000062"), - false, Collections.emptyMap()), + false, Collections.emptyMap(), null), Arguments.of("Test 11: Older Compaction log got pruned and source snapshot delta files would be " + "unreachable", null, @@ -674,7 +678,7 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("1", "3", "13", "14"), ImmutableSet.of("2", "8", "9", "12"), ImmutableSet.of("2", "8", "9", "12"), - false, Collections.emptyMap()), + false, Collections.emptyMap(), prefixMap), Arguments.of("Test 12: Older Compaction log got pruned and source snapshot delta files would be " + "unreachable", null, @@ -686,22 +690,22 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("3", "13", "14"), ImmutableSet.of("4", "5", "8", "9", "12"), null, - false, Collections.emptyMap()), + false, Collections.emptyMap(), prefixMap), Arguments.of("Test 13: Compaction log to test filtering logic based on range and column family", null, getPrunedCompactionEntries(false, - new HashMap() {{ - put("1", new String[]{"a", "c", "col1"}); - put("3", new String[]{"a", "d", "col2"}); - put("13", new String[]{"a", "c", "col13"}); - put("14", new String[]{"a", "c", "col1"}); - put("2", new String[]{"a", "c", "col1"}); - put("4", new String[]{"a", "b", "col1"}); - put("5", new String[]{"b", "b", "col1"}); - put("10", new String[]{"a", "b", "col1"}); - put("8", new String[]{"a", "b", "col1"}); - put("6", new String[]{"a", "z", "col13"}); - put("7", new String[]{"a", "z", "col13"}); + new HashMap() {{ + put("1", new SstFileInfo("1", "a", "c", "col1")); + put("3", new SstFileInfo("3", "a", "d", "col2")); + put("13", new SstFileInfo("13", "a", "c", "col13")); + put("14", new SstFileInfo("14", "a", "c", "col1")); + put("2", new SstFileInfo("2", "a", "c", "col1")); + put("4", new SstFileInfo("4", "a", "b", "col1")); + put("5", new SstFileInfo("5", "b", "b", "col1")); + put("10", new SstFileInfo("10", "a", "b", "col1")); + put("8", new SstFileInfo("8", "a", "b", "col1")); + put("6", new SstFileInfo("6", "a", "z", "col13")); + put("7", new SstFileInfo("7", "a", "z", "col13")); }}), snapshotInfo6, snapshotInfo5, @@ -712,12 +716,12 @@ private static Stream casesGetSSTDiffListWithoutDB() { ImmutableSet.of("2", "9", "12"), false, ImmutableMap.of( - "2", new String[]{"a", "b", "col1"}, - "12", new String[]{"a", "d", "col2"}, - "8", new String[]{"a", "b", "col1"}, - "9", new String[]{"a", "c", "col1"}, - "15", new String[]{"a", "z", "col13"} - )) + "2", new SstFileInfo("2", "a", "b", "col1"), + "12", new SstFileInfo("12", "a", "d", "col2"), + "8", new SstFileInfo("8", "a", "b", "col1"), + "9", new SstFileInfo("9", "a", "c", "col1"), + "15", new SstFileInfo("15", "a", "z", "col13") + ), prefixMap) ); } @@ -740,7 +744,8 @@ public void testGetSSTDiffListWithoutDB(String description, Set expectedDiffSstFiles, Set expectedSSTDiffFiles, boolean expectingException, - Map metaDataMap) { + Map metaDataMap, + TablePrefixInfo prefixInfo) { boolean exceptionThrown = false; if (compactionLog != null) { @@ -756,15 +761,33 @@ public void testGetSSTDiffListWithoutDB(String description, } rocksDBCheckpointDiffer.loadAllCompactionLogs(); - Set actualSameSstFiles = new HashSet<>(); - Set actualDiffSstFiles = new HashSet<>(); + Set tablesToLookup; + String dummyTable; + if (prefixInfo != null) { + tablesToLookup = prefixInfo.getTableNames(); + dummyTable = tablesToLookup.stream().findAny().get(); + } else { + tablesToLookup = mock(Set.class); + when(tablesToLookup.contains(anyString())).thenReturn(true); + dummyTable = "dummy"; + } + Map actualSameSstFiles = new HashMap<>(); + Map actualDiffSstFiles = new HashMap<>(); + List sourceSnapshotFiles = srcSnapshotSstFiles.stream() + .map(fileName -> new SstFileInfo(fileName, "", "", dummyTable)) + .collect(Collectors.toList()); + List destSnapshotFiles = destSnapshotSstFiles.stream() + .map(fileName -> new SstFileInfo(fileName, "", "", dummyTable)) + .collect(Collectors.toList()); + when(srcSnapshot.getSstFiles(eq(0), eq(tablesToLookup))).thenReturn(sourceSnapshotFiles); + when(destSnapshot.getSstFiles(eq(0), eq(tablesToLookup))).thenReturn(destSnapshotFiles); + DifferSnapshotVersion srcVersion = new DifferSnapshotVersion(srcSnapshot, 0, tablesToLookup); + DifferSnapshotVersion destVersion = new DifferSnapshotVersion(destSnapshot, 0, tablesToLookup); try { rocksDBCheckpointDiffer.internalGetSSTDiffList( - srcSnapshot, - destSnapshot, - srcSnapshotSstFiles, - destSnapshotSstFiles, + srcVersion, + destVersion, actualSameSstFiles, actualDiffSstFiles); } catch (RuntimeException rtEx) { @@ -780,57 +803,31 @@ public void testGetSSTDiffListWithoutDB(String description, } // Check same and different SST files result - assertEquals(expectedSameSstFiles, actualSameSstFiles); - assertEquals(expectedDiffSstFiles, actualDiffSstFiles); - try (MockedStatic mockedHandler = Mockito.mockStatic(RdbUtil.class, Mockito.CALLS_REAL_METHODS)) { - RocksDB rocksDB = Mockito.mock(RocksDB.class); - Mockito.when(rocksDB.getName()).thenReturn("dummy"); - Mockito.when(srcSnapshot.getRocksDB().get()).thenReturn(rocksDB); - Mockito.when(destSnapshot.getRocksDB().get()).thenReturn(rocksDB); - Mockito.when(srcSnapshot.getRocksDB().getLiveMetadataForSSTFiles()) - .thenAnswer(invocation -> srcSnapshotSstFiles.stream().filter(metaDataMap::containsKey).map(file -> { - LiveFileMetaData liveFileMetaData = Mockito.mock(LiveFileMetaData.class); - String[] metaData = metaDataMap.get(file); - Mockito.when(liveFileMetaData.fileName()).thenReturn("/" + file + SST_FILE_EXTENSION); - Mockito.when(liveFileMetaData.smallestKey()).thenReturn(metaData[0].getBytes(UTF_8)); - Mockito.when(liveFileMetaData.largestKey()).thenReturn(metaData[1].getBytes(UTF_8)); - Mockito.when(liveFileMetaData.columnFamilyName()).thenReturn(metaData[2].getBytes(UTF_8)); - return liveFileMetaData; - }).collect(Collectors.toMap(liveFileMetaData -> FilenameUtils.getBaseName(liveFileMetaData.fileName()), - Function.identity()))); - Set tablesToLookup; - String dummyTable; - if (srcSnapshot.getTablePrefixes() != null) { - tablesToLookup = srcSnapshot.getTablePrefixes().getTableNames(); - dummyTable = tablesToLookup.stream().findAny().get(); + assertEquals(expectedSameSstFiles, actualSameSstFiles.keySet()); + assertEquals(expectedDiffSstFiles, actualDiffSstFiles.keySet()); + when(srcSnapshot.getSstFiles(eq(0), eq(tablesToLookup))) + .thenAnswer(invocation -> srcSnapshotSstFiles.stream() + .map(file -> metaDataMap.getOrDefault(file, new SstFileInfo(file, null, null, null))) + .collect(Collectors.toList())); + when(destSnapshot.getSstFiles(eq(0), eq(tablesToLookup))) + .thenAnswer(invocation -> destSnapshotSstFiles.stream() + .map(file -> metaDataMap.getOrDefault(file, new SstFileInfo(file, null, null, null))) + .collect(Collectors.toList())); + + try { + Assertions.assertEquals(Optional.ofNullable(expectedSSTDiffFiles) + .map(files -> files.stream().sorted().collect(Collectors.toList())).orElse(null), + rocksDBCheckpointDiffer.getSSTDiffList( + new DifferSnapshotVersion(srcSnapshot, 0, tablesToLookup), + new DifferSnapshotVersion(destSnapshot, 0, tablesToLookup), prefixInfo, tablesToLookup, + true) + .map(i -> i.stream().map(SstFileInfo::getFileName).sorted().collect(Collectors.toList())).orElse(null)); + } catch (RuntimeException rtEx) { + if (!expectingException) { + rtEx.printStackTrace(); + fail("Unexpected exception thrown in test."); } else { - tablesToLookup = mock(Set.class); - when(tablesToLookup.contains(anyString())).thenReturn(true); - dummyTable = "dummy"; - } - mockedHandler.when(() -> RdbUtil.getLiveSSTFilesForCFs(any(), any())) - .thenAnswer(i -> { - Set sstFiles = i.getArgument(0).equals(srcSnapshot.getRocksDB()) ? srcSnapshotSstFiles - : destSnapshotSstFiles; - return sstFiles.stream().map(fileName -> { - LiveFileMetaData liveFileMetaData = mock(LiveFileMetaData.class); - when(liveFileMetaData.fileName()).thenReturn("/" + fileName + SST_FILE_EXTENSION); - when(liveFileMetaData.columnFamilyName()).thenReturn(dummyTable.getBytes(UTF_8)); - return liveFileMetaData; - }).collect(Collectors.toList()); - }); - try { - Assertions.assertEquals(Optional.ofNullable(expectedSSTDiffFiles) - .map(files -> files.stream().sorted().collect(Collectors.toList())).orElse(null), - rocksDBCheckpointDiffer.getSSTDiffList(srcSnapshot, destSnapshot, tablesToLookup) - .map(i -> i.stream().sorted().collect(Collectors.toList())).orElse(null)); - } catch (RuntimeException rtEx) { - if (!expectingException) { - rtEx.printStackTrace(); - fail("Unexpected exception thrown in test."); - } else { - exceptionThrown = true; - } + exceptionThrown = true; } } if (expectingException && !exceptionThrown) { @@ -878,19 +875,6 @@ void testDifferWithDB() throws Exception { if (LOG.isDebugEnabled()) { rocksDBCheckpointDiffer.dumpCompactionNodeTable(); } - - cleanUpSnapshots(); - } - - public void cleanUpSnapshots() { - for (DifferSnapshotInfo snap : snapshots) { - snap.getRocksDB().close(); - } - for (List colHandle : colHandles) { - for (ColumnFamilyHandle handle : colHandle) { - handle.close(); - } - } } private static List getColumnFamilyDescriptors() { @@ -941,18 +925,21 @@ void diffAllSnapshots(RocksDBCheckpointDiffer differ) if (rocksDBCheckpointDiffer.getCompactionNodeMap().containsKey(diffFile)) { columnFamily = rocksDBCheckpointDiffer.getCompactionNodeMap().get(diffFile).getColumnFamily(); } else { - columnFamily = bytes2String(src.getRocksDB().getLiveMetadataForSSTFiles().get(diffFile).columnFamilyName()); + columnFamily = src.getSstFile(0, diffFile).getColumnFamily(); } if (columnFamily == null || tableToLookUp.contains(columnFamily)) { expectedDiffFiles.add(diffFile); } } - List sstDiffList = differ.getSSTDiffList(src, snap, tableToLookUp).orElse(Collections.emptyList()); + DifferSnapshotVersion srcSnapVersion = new DifferSnapshotVersion(src, 0, tableToLookUp); + DifferSnapshotVersion destSnapVersion = new DifferSnapshotVersion(snap, 0, tableToLookUp); + List sstDiffList = differ.getSSTDiffList(srcSnapVersion, destSnapVersion, null, + tableToLookUp, true).orElse(Collections.emptyList()); LOG.info("SST diff list from '{}' to '{}': {} tables: {}", - src.getDbPath(), snap.getDbPath(), sstDiffList, tableToLookUp); - - assertEquals(expectedDiffFiles, sstDiffList); + src.getDbPath(0), snap.getDbPath(0), sstDiffList, tableToLookUp); + assertEquals(expectedDiffFiles, sstDiffList.stream().map(SstFileInfo::getFileName) + .collect(Collectors.toList())); } ++index; @@ -980,12 +967,14 @@ private void createCheckpoint(ManagedRocksDB rocksDB) throws RocksDBException { createCheckPoint(ACTIVE_DB_DIR_NAME, cpPath, rocksDB); final UUID snapshotId = UUID.randomUUID(); List colHandle = new ArrayList<>(); - colHandles.add(colHandle); - final DifferSnapshotInfo currentSnapshot = - new DifferSnapshotInfo(cpPath, snapshotId, snapshotGeneration, null, - ManagedRocksDB.openReadOnly(cpPath, getColumnFamilyDescriptors(), - colHandle)); - this.snapshots.add(currentSnapshot); + try (ManagedRocksDB rdb = ManagedRocksDB.openReadOnly(cpPath, getColumnFamilyDescriptors(), colHandle)) { + TreeMap> versionSstFilesMap = new TreeMap<>(); + versionSstFilesMap.put(0, rdb.getLiveMetadataForSSTFiles().values().stream().map(SstFileInfo::new) + .collect(Collectors.toList())); + final DifferSnapshotInfo currentSnapshot = new DifferSnapshotInfo((version) -> Paths.get(cpPath), + snapshotId, snapshotGeneration, versionSstFilesMap); + this.snapshots.add(currentSnapshot); + } long t2 = Time.monotonicNow(); LOG.trace("Current time: " + t2); @@ -1347,18 +1336,18 @@ private static CompactionLogEntry createCompactionEntry(long dbSequenceNumber, long compactionTime, List inputFiles, List outputFiles, - Map metadata) { + Map metadata) { return new CompactionLogEntry.Builder(dbSequenceNumber, compactionTime, toFileInfoList(inputFiles, metadata), toFileInfoList(outputFiles, metadata)).build(); } private static List toFileInfoList(List files, - Map metadata) { + Map metadata) { return files.stream() .map(fileName -> new CompactionFileInfo.Builder(fileName) - .setStartRange(Optional.ofNullable(metadata.get(fileName)).map(meta -> meta[0]).orElse(null)) - .setEndRange(Optional.ofNullable(metadata.get(fileName)).map(meta -> meta[1]).orElse(null)) - .setColumnFamily(Optional.ofNullable(metadata.get(fileName)).map(meta -> meta[2]).orElse(null)) + .setStartRange(Optional.ofNullable(metadata.get(fileName)).map(SstFileInfo::getStartKey).orElse(null)) + .setEndRange(Optional.ofNullable(metadata.get(fileName)).map(SstFileInfo::getEndKey).orElse(null)) + .setColumnFamily(Optional.ofNullable(metadata.get(fileName)).map(SstFileInfo::getColumnFamily).orElse(null)) .build()) .collect(Collectors.toList()); } @@ -1621,25 +1610,36 @@ public void testGetSSTDiffListWithoutDB2( // Snapshot is used for logging purpose and short-circuiting traversal. // Using gen 0 for this test. + List srcSnapshotSstFileInfoSet = srcSnapshotSstFiles.stream() + .map(fileName -> new SstFileInfo(fileName, "", "", "cf1")).collect(Collectors.toList()); + List destSnapshotSstFileInfoSet = destSnapshotSstFiles.stream() + .map(fileName -> new SstFileInfo(fileName, "", "", "cf1")).collect(Collectors.toList()); + TreeMap> srcSnapshotSstFileInfoMap = new TreeMap<>(); + srcSnapshotSstFileInfoMap.put(0, srcSnapshotSstFileInfoSet); + TreeMap> destSnapshotSstFileInfoMap = new TreeMap<>(); + destSnapshotSstFileInfoMap.put(0, destSnapshotSstFileInfoSet); + Path path1 = dbDir.toPath().resolve("path").resolve("to").resolve("dbcp1").toAbsolutePath(); + Path path2 = dbDir.toPath().resolve("path").resolve("to").resolve("dbcp2").toAbsolutePath(); DifferSnapshotInfo mockedSourceSnapshot = new DifferSnapshotInfo( - "/path/to/dbcp1", UUID.randomUUID(), 0L, columnFamilyPrefixInfo, null); + (version) -> path1, UUID.randomUUID(), 0L, srcSnapshotSstFileInfoMap); DifferSnapshotInfo mockedDestinationSnapshot = new DifferSnapshotInfo( - "/path/to/dbcp2", UUID.randomUUID(), 0L, columnFamilyPrefixInfo, null); - - Set actualSameSstFiles = new HashSet<>(); - Set actualDiffSstFiles = new HashSet<>(); - + (version) -> path2, UUID.randomUUID(), 0L, destSnapshotSstFileInfoMap); + + Map actualSameSstFiles = new HashMap<>(); + Map actualDiffSstFiles = new HashMap<>(); + DifferSnapshotVersion srcSnapshotVersion = new DifferSnapshotVersion(mockedSourceSnapshot, 0, + Collections.singleton("cf1")); + DifferSnapshotVersion destSnapshotVersion = new DifferSnapshotVersion(mockedDestinationSnapshot, 0, + Collections.singleton("cf1")); rocksDBCheckpointDiffer.internalGetSSTDiffList( - mockedSourceSnapshot, - mockedDestinationSnapshot, - srcSnapshotSstFiles, - destSnapshotSstFiles, + srcSnapshotVersion, + destSnapshotVersion, actualSameSstFiles, actualDiffSstFiles); // Check same and different SST files result - assertEquals(expectedSameSstFiles, actualSameSstFiles); - assertEquals(expectedDiffSstFiles, actualDiffSstFiles); + assertEquals(expectedSameSstFiles, actualSameSstFiles.keySet()); + assertEquals(expectedDiffSstFiles, actualDiffSstFiles.keySet()); } private static Stream shouldSkipNodeCases() { diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java index a44baf1905f3..08ff90ab6cc8 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDiffUtils.java @@ -24,27 +24,21 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; -import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.IntStream; import java.util.stream.Stream; import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.ozone.rocksdb.util.SstFileInfo; import org.assertj.core.util.Sets; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import org.mockito.Mockito; -import org.rocksdb.LiveFileMetaData; -import org.rocksdb.RocksDB; /** * Class to test RocksDiffUtils. @@ -80,9 +74,6 @@ public void testFilterFunction() { public static Stream values() { return Stream.of( - arguments("validColumnFamily", "invalidColumnFamily", "a", "d", "b", "f"), - arguments("validColumnFamily", "invalidColumnFamily", "a", "d", "e", "f"), - arguments("validColumnFamily", "invalidColumnFamily", "a", "d", "a", "f"), arguments("validColumnFamily", "validColumnFamily", "a", "d", "e", "g"), arguments("validColumnFamily", "validColumnFamily", "e", "g", "a", "d"), arguments("validColumnFamily", "validColumnFamily", "b", "b", "e", "g"), @@ -92,95 +83,40 @@ public static Stream values() { @ParameterizedTest @MethodSource("values") - public void testFilterRelevantSstFilesWithPreExistingCompactionInfo(String validSSTColumnFamilyName, - String invalidColumnFamilyName, - String validSSTFileStartRange, - String validSSTFileEndRange, - String invalidSSTFileStartRange, - String invalidSSTFileEndRange) { + public void testFilterRelevantSstFilesMap(String validSSTColumnFamilyName, String invalidColumnFamilyName, + String validSSTFileStartRange, String validSSTFileEndRange, String invalidSSTFileStartRange, + String invalidSSTFileEndRange) { String validSstFile = "filePath/validSSTFile.sst"; String invalidSstFile = "filePath/invalidSSTFile.sst"; String untrackedSstFile = "filePath/untrackedSSTFile.sst"; String expectedPrefix = String.valueOf((char)(((int)validSSTFileEndRange.charAt(0) + validSSTFileStartRange.charAt(0)) / 2)); - Set sstFile = Sets.newTreeSet(validSstFile, invalidSstFile, untrackedSstFile); - Set inputSstFiles = new HashSet<>(); + Map sstFile = ImmutableMap.of( + validSstFile, new SstFileInfo(validSstFile, validSSTFileStartRange, validSSTFileEndRange, + validSSTColumnFamilyName), invalidSstFile, new SstFileInfo(invalidSstFile, invalidSSTFileStartRange, + invalidSSTFileEndRange, invalidColumnFamilyName), untrackedSstFile, + new SstFileInfo(untrackedSstFile, null, null, null)); + Map inputSstFiles = new HashMap<>(); List> tablesToLookupSet = Arrays.asList(ImmutableSet.of(validSSTColumnFamilyName), ImmutableSet.of(invalidColumnFamilyName), ImmutableSet.of(validSSTColumnFamilyName, invalidColumnFamilyName), Collections.emptySet()); for (Set tablesToLookup : tablesToLookupSet) { inputSstFiles.clear(); - inputSstFiles.addAll(sstFile); + inputSstFiles.putAll(sstFile); RocksDiffUtils.filterRelevantSstFiles(inputSstFiles, + tablesToLookup, new TablePrefixInfo( new HashMap() {{ put(invalidColumnFamilyName, getLexicographicallyHigherString(invalidSSTFileEndRange)); put(validSSTColumnFamilyName, expectedPrefix); - }}), ImmutableMap.of("validSSTFile", new CompactionNode(validSstFile, 0, validSSTFileStartRange, - validSSTFileEndRange, validSSTColumnFamilyName), "invalidSSTFile", - new CompactionNode(invalidSstFile, 0, invalidSSTFileStartRange, - invalidSSTFileEndRange, invalidColumnFamilyName)), tablesToLookup); + }})); if (tablesToLookup.contains(validSSTColumnFamilyName)) { - Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile), inputSstFiles, + Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile), inputSstFiles.keySet(), "Failed for " + tablesToLookup); } else { - Assertions.assertEquals(Sets.newTreeSet(untrackedSstFile), inputSstFiles, "Failed for " + tablesToLookup); - } - } - } - - private LiveFileMetaData getMockedLiveFileMetadata(String columnFamilyName, String startRange, - String endRange, - String name) { - LiveFileMetaData liveFileMetaData = Mockito.mock(LiveFileMetaData.class); - Mockito.when(liveFileMetaData.largestKey()).thenReturn(endRange.getBytes(StandardCharsets.UTF_8)); - Mockito.when(liveFileMetaData.columnFamilyName()).thenReturn(columnFamilyName.getBytes(StandardCharsets.UTF_8)); - Mockito.when(liveFileMetaData.smallestKey()).thenReturn(startRange.getBytes(StandardCharsets.UTF_8)); - Mockito.when(liveFileMetaData.fileName()).thenReturn("basePath/" + name + ".sst"); - return liveFileMetaData; - } - - @ParameterizedTest - @MethodSource("values") - public void testFilterRelevantSstFilesFromDB(String validSSTColumnFamilyName, - String invalidColumnFamilyName, - String validSSTFileStartRange, - String validSSTFileEndRange, - String invalidSSTFileStartRange, - String invalidSSTFileEndRange) { - for (int numberOfDBs = 1; numberOfDBs < 10; numberOfDBs++) { - String validSstFile = "filePath/validSSTFile.sst"; - String invalidSstFile = "filePath/invalidSSTFile.sst"; - String untrackedSstFile = "filePath/untrackedSSTFile.sst"; - int expectedDBKeyIndex = numberOfDBs / 2; - ManagedRocksDB[] rocksDBs = - IntStream.range(0, numberOfDBs).mapToObj(i -> Mockito.mock(ManagedRocksDB.class)) - .collect(Collectors.toList()).toArray(new ManagedRocksDB[numberOfDBs]); - for (int i = 0; i < numberOfDBs; i++) { - ManagedRocksDB managedRocksDB = rocksDBs[i]; - RocksDB mockedRocksDB = Mockito.mock(RocksDB.class); - Mockito.when(managedRocksDB.get()).thenReturn(mockedRocksDB); - if (i == expectedDBKeyIndex) { - LiveFileMetaData validLiveFileMetaData = getMockedLiveFileMetadata(validSSTColumnFamilyName, - validSSTFileStartRange, validSSTFileEndRange, "validSSTFile"); - LiveFileMetaData invalidLiveFileMetaData = getMockedLiveFileMetadata(invalidColumnFamilyName, - invalidSSTFileStartRange, invalidSSTFileEndRange, "invalidSSTFile"); - List liveFileMetaDatas = Arrays.asList(validLiveFileMetaData, invalidLiveFileMetaData); - Mockito.when(mockedRocksDB.getLiveFilesMetaData()).thenReturn(liveFileMetaDatas); - } else { - Mockito.when(mockedRocksDB.getLiveFilesMetaData()).thenReturn(Collections.emptyList()); - } - Mockito.when(managedRocksDB.getLiveMetadataForSSTFiles()) - .thenAnswer(invocation -> ManagedRocksDB.getLiveMetadataForSSTFiles(mockedRocksDB)); + Assertions.assertEquals(Sets.newTreeSet(untrackedSstFile), inputSstFiles.keySet(), + "Failed for " + tablesToLookup); } - - String expectedPrefix = String.valueOf((char)(((int)validSSTFileEndRange.charAt(0) + - validSSTFileStartRange.charAt(0)) / 2)); - Set sstFile = Sets.newTreeSet(validSstFile, invalidSstFile, untrackedSstFile); - RocksDiffUtils.filterRelevantSstFiles(sstFile, new TablePrefixInfo(ImmutableMap.of(validSSTColumnFamilyName, - expectedPrefix)), Collections.emptyMap(), - ImmutableSet.of(validSSTColumnFamilyName), rocksDBs); - Assertions.assertEquals(Sets.newTreeSet(validSstFile, untrackedSstFile), sstFile); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java index 2cd38c0feafa..8928f504ce4a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java @@ -17,11 +17,13 @@ package org.apache.hadoop.ozone.freon; +import static java.util.stream.Collectors.toMap; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.DB_COMPACTION_LOG_DIR; import static org.apache.hadoop.ozone.OzoneConsts.DB_COMPACTION_SST_BACKUP_DIR; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIFF_DIR; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEFRAG_SERVICE_INTERVAL; import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COLUMN_FAMILIES_TO_TRACK_IN_DAG; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -34,14 +36,16 @@ import java.time.Duration; import java.util.Collections; import java.util.List; -import java.util.concurrent.TimeoutException; +import java.util.Map; +import java.util.NavigableMap; +import java.util.TreeMap; import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.RDBStore; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; @@ -56,8 +60,11 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.ozone.rocksdb.util.SstFileInfo; import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DifferSnapshotVersion; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; @@ -103,6 +110,7 @@ public static void init() throws Exception { conf.setFromObject(raftClientConfig); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); + conf.setInt(OZONE_SNAPSHOT_DEFRAG_SERVICE_INTERVAL, -1); // Set DB CF write buffer to a much lower value so that flush and compaction // happens much more frequently without having to create a lot of keys. @@ -141,9 +149,9 @@ private static String getSnapshotDBKey(String volumeName, String bucketName, return dbKeyPrefix + OM_KEY_PREFIX + snapshotName; } - private DifferSnapshotInfo getDifferSnapshotInfo( - OMMetadataManager omMetadataManager, String volumeName, String bucketName, - String snapshotName, ManagedRocksDB snapshotDB) throws IOException { + private DifferSnapshotVersion getDifferSnapshotInfo( + OMMetadataManager omMetadataManager, OmSnapshotLocalDataManager localDataManager, + String volumeName, String bucketName, String snapshotName) throws IOException { final String dbKey = getSnapshotDBKey(volumeName, bucketName, snapshotName); final SnapshotInfo snapshotInfo = @@ -152,16 +160,22 @@ private DifferSnapshotInfo getDifferSnapshotInfo( // Use RocksDB transaction sequence number in SnapshotInfo, which is // persisted at the time of snapshot creation, as the snapshot generation - return new DifferSnapshotInfo(checkpointPath, snapshotInfo.getSnapshotId(), - snapshotInfo.getDbTxSequenceNumber(), - omMetadataManager.getTableBucketPrefix(volumeName, bucketName), - snapshotDB); + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider snapshotLocalData = + localDataManager.getOmSnapshotLocalData(snapshotInfo)) { + NavigableMap> versionSstFiles = snapshotLocalData.getSnapshotLocalData() + .getVersionSstFileInfos().entrySet().stream() + .collect(toMap(Map.Entry::getKey, entry -> entry.getValue().getSstFiles(), + (u, v) -> { + throw new IllegalStateException(String.format("Duplicate key %s", u)); + }, TreeMap::new)); + DifferSnapshotInfo dsi = new DifferSnapshotInfo((version) -> Paths.get(checkpointPath), + snapshotInfo.getSnapshotId(), snapshotInfo.getDbTxSequenceNumber(), versionSstFiles); + return new DifferSnapshotVersion(dsi, 0, COLUMN_FAMILIES_TO_TRACK_IN_DAG); + } } @Test - public void testDAGReconstruction() - throws IOException, InterruptedException, TimeoutException { - + public void testDAGReconstruction() throws IOException { // Generate keys RandomKeyGenerator randomKeyGenerator = new RandomKeyGenerator(cluster.getConf()); @@ -208,28 +222,22 @@ public void testDAGReconstruction() // Get snapshot SST diff list OzoneManager ozoneManager = cluster.getOzoneManager(); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + TablePrefixInfo bucketPrefix = omMetadataManager.getTableBucketPrefix(volumeName, bucketName); + OmSnapshotLocalDataManager localDataManager = ozoneManager.getOmSnapshotManager().getSnapshotLocalDataManager(); RDBStore rdbStore = (RDBStore) omMetadataManager.getStore(); RocksDBCheckpointDiffer differ = rdbStore.getRocksDBCheckpointDiffer(); UncheckedAutoCloseableSupplier snapDB1 = ozoneManager.getOmSnapshotManager() .getActiveSnapshot(volumeName, bucketName, "snap1"); UncheckedAutoCloseableSupplier snapDB2 = ozoneManager.getOmSnapshotManager() .getActiveSnapshot(volumeName, bucketName, "snap2"); - DifferSnapshotInfo snap1 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap1", - ((RDBStore) snapDB1.get() - .getMetadataManager().getStore()).getDb().getManagedRocksDb()); - DifferSnapshotInfo snap2 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap2", ((RDBStore) snapDB2.get() - .getMetadataManager().getStore()).getDb().getManagedRocksDb()); + DifferSnapshotVersion snap1 = getDifferSnapshotInfo(omMetadataManager, localDataManager, + volumeName, bucketName, "snap1"); + DifferSnapshotVersion snap2 = getDifferSnapshotInfo(omMetadataManager, localDataManager, + volumeName, bucketName, "snap2"); // RocksDB does checkpointing in a separate thread, wait for it - final File checkpointSnap1 = new File(snap1.getDbPath()); - GenericTestUtils.waitFor(checkpointSnap1::exists, 2000, 20000); - final File checkpointSnap2 = new File(snap2.getDbPath()); - GenericTestUtils.waitFor(checkpointSnap2::exists, 2000, 20000); - - List sstDiffList21 = differ.getSSTDiffList(snap2, snap1, COLUMN_FAMILIES_TO_TRACK_IN_DAG) - .orElse(Collections.emptyList()); + List sstDiffList21 = differ.getSSTDiffList(snap2, snap1, bucketPrefix, + COLUMN_FAMILIES_TO_TRACK_IN_DAG, true).orElse(Collections.emptyList()); LOG.debug("Got diff list: {}", sstDiffList21); // Delete 1000 keys, take a 3rd snapshot, and do another diff @@ -241,23 +249,19 @@ public void testDAGReconstruction() LOG.debug("Snapshot created: {}", resp); UncheckedAutoCloseableSupplier snapDB3 = ozoneManager.getOmSnapshotManager() .getActiveSnapshot(volumeName, bucketName, "snap3"); - DifferSnapshotInfo snap3 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap3", - ((RDBStore) snapDB3.get() - .getMetadataManager().getStore()).getDb().getManagedRocksDb()); - final File checkpointSnap3 = new File(snap3.getDbPath()); - GenericTestUtils.waitFor(checkpointSnap3::exists, 2000, 20000); + DifferSnapshotVersion snap3 = getDifferSnapshotInfo(omMetadataManager, localDataManager, volumeName, bucketName, + "snap3"); - List sstDiffList32 = differ.getSSTDiffList(snap3, snap2, COLUMN_FAMILIES_TO_TRACK_IN_DAG) - .orElse(Collections.emptyList()); + List sstDiffList32 = differ.getSSTDiffList(snap3, snap2, bucketPrefix, + COLUMN_FAMILIES_TO_TRACK_IN_DAG, true).orElse(Collections.emptyList()); // snap3-snap1 diff result is a combination of snap3-snap2 and snap2-snap1 - List sstDiffList31 = differ.getSSTDiffList(snap3, snap1, COLUMN_FAMILIES_TO_TRACK_IN_DAG) - .orElse(Collections.emptyList()); + List sstDiffList31 = differ.getSSTDiffList(snap3, snap1, bucketPrefix, + COLUMN_FAMILIES_TO_TRACK_IN_DAG, true).orElse(Collections.emptyList()); // Same snapshot. Result should be empty list - List sstDiffList22 = differ.getSSTDiffList(snap2, snap2, COLUMN_FAMILIES_TO_TRACK_IN_DAG) - .orElse(Collections.emptyList()); + List sstDiffList22 = differ.getSSTDiffList(snap2, snap2, bucketPrefix, + COLUMN_FAMILIES_TO_TRACK_IN_DAG, true).orElse(Collections.emptyList()); assertThat(sstDiffList22).isEmpty(); snapDB1.close(); snapDB2.close(); @@ -266,33 +270,29 @@ public void testDAGReconstruction() cluster.restartOzoneManager(); ozoneManager = cluster.getOzoneManager(); omMetadataManager = ozoneManager.getMetadataManager(); + localDataManager = ozoneManager.getOmSnapshotManager().getSnapshotLocalDataManager(); snapDB1 = ozoneManager.getOmSnapshotManager() .getActiveSnapshot(volumeName, bucketName, "snap1"); snapDB2 = ozoneManager.getOmSnapshotManager() .getActiveSnapshot(volumeName, bucketName, "snap2"); - snap1 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap1", - ((RDBStore) snapDB1.get() - .getMetadataManager().getStore()).getDb().getManagedRocksDb()); - snap2 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap2", ((RDBStore) snapDB2.get() - .getMetadataManager().getStore()).getDb().getManagedRocksDb()); + snap1 = getDifferSnapshotInfo(omMetadataManager, localDataManager, + volumeName, bucketName, "snap1"); + snap2 = getDifferSnapshotInfo(omMetadataManager, localDataManager, + volumeName, bucketName, "snap2"); snapDB3 = ozoneManager.getOmSnapshotManager() .getActiveSnapshot(volumeName, bucketName, "snap3"); - snap3 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap3", - ((RDBStore) snapDB3.get() - .getMetadataManager().getStore()).getDb().getManagedRocksDb()); - List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1, COLUMN_FAMILIES_TO_TRACK_IN_DAG) - .orElse(Collections.emptyList()); + snap3 = getDifferSnapshotInfo(omMetadataManager, localDataManager, + volumeName, bucketName, "snap3"); + List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1, bucketPrefix, + COLUMN_FAMILIES_TO_TRACK_IN_DAG, true).orElse(Collections.emptyList()); assertEquals(sstDiffList21, sstDiffList21Run2); - List sstDiffList32Run2 = differ.getSSTDiffList(snap3, snap2, COLUMN_FAMILIES_TO_TRACK_IN_DAG) - .orElse(Collections.emptyList()); + List sstDiffList32Run2 = differ.getSSTDiffList(snap3, snap2, bucketPrefix, + COLUMN_FAMILIES_TO_TRACK_IN_DAG, true).orElse(Collections.emptyList()); assertEquals(sstDiffList32, sstDiffList32Run2); - List sstDiffList31Run2 = differ.getSSTDiffList(snap3, snap1, COLUMN_FAMILIES_TO_TRACK_IN_DAG) - .orElse(Collections.emptyList()); + List sstDiffList31Run2 = differ.getSSTDiffList(snap3, snap1, bucketPrefix, + COLUMN_FAMILIES_TO_TRACK_IN_DAG, true).orElse(Collections.emptyList()); assertEquals(sstDiffList31, sstDiffList31Run2); snapDB1.close(); snapDB2.close(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 453eb3b3b075..5d715cdc0d8d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -316,7 +316,7 @@ public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { cacheCleanupServiceInterval, compactNonSnapshotDiffTables, ozoneManager.getMetadataManager().getLock()); this.snapshotDiffManager = new SnapshotDiffManager(snapshotDiffDb, differ, - ozoneManager, snapDiffJobCf, snapDiffReportCf, + ozoneManager, snapshotLocalDataManager, snapDiffJobCf, snapDiffReportCf, columnFamilyOptions, codecRegistry); diffCleanupServiceInterval = ozoneManager.getConfiguration() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index e5bc8dcfa91a..78af0d9dd9d2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.om.snapshot; +import static java.util.stream.Collectors.toMap; import static org.apache.commons.lang3.StringUtils.leftPad; import static org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString; import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.CREATE; @@ -60,6 +61,7 @@ import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus.OBJECT_ID_MAP_GEN_OBS; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus.SST_FILE_DELTA_DAG_WALK; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus.SST_FILE_DELTA_FULL_DIFF; +import static org.apache.ozone.rocksdiff.RocksDiffUtils.filterRelevantSstFiles; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Maps; @@ -78,9 +80,11 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.NavigableMap; import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.Callable; @@ -90,7 +94,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; -import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.commons.io.file.PathUtils; import org.apache.commons.lang3.tuple.Pair; @@ -109,6 +112,8 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData; +import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -117,6 +122,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.ListSnapshotDiffJobResponse; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; @@ -125,10 +131,10 @@ import org.apache.hadoop.ozone.util.ClosableIterator; import org.apache.logging.log4j.util.Strings; import org.apache.ozone.rocksdb.util.RdbUtil; +import org.apache.ozone.rocksdb.util.SstFileInfo; import org.apache.ozone.rocksdb.util.SstFileSetReader; import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; -import org.apache.ozone.rocksdiff.RocksDiffUtils; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; @@ -153,6 +159,7 @@ public class SnapshotDiffManager implements AutoCloseable { private final ManagedRocksDB db; private final RocksDBCheckpointDiffer differ; private final OzoneManager ozoneManager; + private final OMMetadataManager activeOmMetadataManager; private final CodecRegistry codecRegistry; private final ManagedColumnFamilyOptions familyOptions; // TODO: [SNAPSHOT] Use different wait time based of job status. @@ -194,11 +201,13 @@ public class SnapshotDiffManager implements AutoCloseable { (SnapshotInfo fromSnapshotInfo, SnapshotInfo toSnapshotInfo) -> fromSnapshotInfo.getSnapshotId() + DELIMITER + toSnapshotInfo.getSnapshotId(); + private final OmSnapshotLocalDataManager snapshotLocalDataManager; @SuppressWarnings("parameternumber") public SnapshotDiffManager(ManagedRocksDB db, RocksDBCheckpointDiffer differ, OzoneManager ozoneManager, + OmSnapshotLocalDataManager snapshotLocalDataManager, ColumnFamilyHandle snapDiffJobCfh, ColumnFamilyHandle snapDiffReportCfh, ManagedColumnFamilyOptions familyOptions, @@ -206,6 +215,8 @@ public SnapshotDiffManager(ManagedRocksDB db, this.db = db; this.differ = differ; this.ozoneManager = ozoneManager; + this.activeOmMetadataManager = ozoneManager.getMetadataManager(); + this.snapshotLocalDataManager = snapshotLocalDataManager; this.familyOptions = familyOptions; this.codecRegistry = codecRegistry; this.defaultWaitTime = ozoneManager.getConfiguration().getTimeDuration( @@ -350,37 +361,34 @@ private void deleteDir(Path path) { /** * Convert from SnapshotInfo to DifferSnapshotInfo. */ - private DifferSnapshotInfo getDSIFromSI(SnapshotInfo snapshotInfo, - OmSnapshot omSnapshot, final String volumeName, final String bucketName) - throws IOException { - - final OMMetadataManager snapshotOMMM = omSnapshot.getMetadataManager(); - final String checkpointPath = - snapshotOMMM.getStore().getDbLocation().getPath(); + private static DifferSnapshotInfo getDSIFromSI(OMMetadataManager activeOmMetadataManager, + SnapshotInfo snapshotInfo, OmSnapshotLocalData snapshotLocalData) throws IOException { final UUID snapshotId = snapshotInfo.getSnapshotId(); final long dbTxSequenceNumber = snapshotInfo.getDbTxSequenceNumber(); + NavigableMap> versionSstFiles = snapshotLocalData.getVersionSstFileInfos() + .entrySet().stream().collect(toMap(Map.Entry::getKey, entry -> entry.getValue().getSstFiles(), + (u, v) -> { + throw new IllegalStateException(String.format("Duplicate key %s", u)); + }, TreeMap::new)); + if (versionSstFiles.isEmpty()) { + throw new IOException(String.format("No versions found corresponding to %s", snapshotId)); + } return new DifferSnapshotInfo( - checkpointPath, - snapshotId, - dbTxSequenceNumber, - snapshotOMMM.getTableBucketPrefix(volumeName, bucketName), - ((RDBStore)snapshotOMMM.getStore()).getDb().getManagedRocksDb()); + version -> OmSnapshotManager.getSnapshotPath(activeOmMetadataManager, snapshotId, version), + snapshotId, dbTxSequenceNumber, versionSstFiles); } @VisibleForTesting - protected Set getSSTFileListForSnapshot(OmSnapshot snapshot, - Set tablesToLookUp) { - return RdbUtil.getSSTFilesForComparison(((RDBStore)snapshot - .getMetadataManager().getStore()).getDb().getManagedRocksDb(), - tablesToLookUp); + protected Set getSSTFileListForSnapshot(OmSnapshot snapshot, Set tablesToLookUp) { + return RdbUtil.getSSTFilesForComparison( + ((RDBStore)snapshot.getMetadataManager().getStore()).getDb().getManagedRocksDb(), tablesToLookUp); } @VisibleForTesting - protected Map getSSTFileMapForSnapshot(OmSnapshot snapshot, + protected Map getSSTFileMapForSnapshot(OmSnapshot snapshot, Set tablesToLookUp) throws IOException { return RdbUtil.getSSTFilesWithInodesForComparison(((RDBStore)snapshot - .getMetadataManager().getStore()).getDb().getManagedRocksDb(), - tablesToLookUp); + .getMetadataManager().getStore()).getDb().getManagedRocksDb(), tablesToLookUp); } /** @@ -1061,10 +1069,12 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( // tombstone is not loaded. // TODO: [SNAPSHOT] Update Rocksdb SSTFileIterator to read tombstone if (skipNativeDiff || !isNativeLibsLoaded) { - Set inputFiles = getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp); - ManagedRocksDB fromDB = ((RDBStore)fromSnapshot.getMetadataManager().getStore()).getDb().getManagedRocksDb(); - RocksDiffUtils.filterRelevantSstFiles(inputFiles, tablePrefixes, tablesToLookUp, fromDB); - deltaFiles.addAll(inputFiles); + Set inputFiles = filterRelevantSstFiles(getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp), + tablesToLookUp, tablePrefixes); + Path fromSnapshotPath = fromSnapshot.getMetadataManager().getStore().getDbLocation().getAbsoluteFile().toPath(); + for (SstFileInfo sstFileInfo : inputFiles) { + deltaFiles.add(sstFileInfo.getFilePath(fromSnapshotPath).toAbsolutePath().toString()); + } } if (LOG.isDebugEnabled()) { LOG.debug("Computed Delta SST File Set, Total count = {} ", deltaFiles.size()); @@ -1171,25 +1181,28 @@ Set getDeltaFiles(OmSnapshot fromSnapshot, throws IOException { // TODO: [SNAPSHOT] Refactor the parameter list Optional> deltaFiles = Optional.empty(); - // Check if compaction DAG is available, use that if so if (differ != null && fsInfo != null && tsInfo != null && !useFullDiff) { - String volume = fsInfo.getVolumeName(); - String bucket = fsInfo.getBucketName(); - // Construct DifferSnapshotInfo - final DifferSnapshotInfo fromDSI = - getDSIFromSI(fsInfo, fromSnapshot, volume, bucket); - final DifferSnapshotInfo toDSI = - getDSIFromSI(tsInfo, toSnapshot, volume, bucket); - - recordActivity(jobKey, SST_FILE_DELTA_DAG_WALK); - LOG.debug("Calling RocksDBCheckpointDiffer"); - try { - deltaFiles = differ.getSSTDiffListWithFullPath(toDSI, fromDSI, tablesToLookUp, diffDir).map(HashSet::new); - } catch (Exception exception) { - recordActivity(jobKey, SST_FILE_DELTA_FULL_DIFF); - LOG.warn("Failed to get SST diff file using RocksDBCheckpointDiffer. " + - "It will fallback to full diff now.", exception); + try (ReadableOmSnapshotLocalDataProvider snapLocalDataProvider = snapshotLocalDataManager.getOmSnapshotLocalData( + toSnapshot.getSnapshotID(), fromSnapshot.getSnapshotID())) { + OmSnapshotLocalData toSnapshotLocalData = snapLocalDataProvider.getSnapshotLocalData(); + OmSnapshotLocalData fromSnapshotLocalData = snapLocalDataProvider.getPreviousSnapshotLocalData(); + // Construct DifferSnapshotInfo + final DifferSnapshotInfo fromDSI = getDSIFromSI(activeOmMetadataManager, fsInfo, fromSnapshotLocalData); + final DifferSnapshotInfo toDSI = getDSIFromSI(activeOmMetadataManager, tsInfo, toSnapshotLocalData); + + recordActivity(jobKey, SST_FILE_DELTA_DAG_WALK); + LOG.debug("Calling RocksDBCheckpointDiffer"); + try { + final Map versionMap = toSnapshotLocalData.getVersionSstFileInfos().entrySet() + .stream().collect(toMap(Map.Entry::getKey, entry -> entry.getValue().getPreviousSnapshotVersion())); + deltaFiles = differ.getSSTDiffListWithFullPath(toDSI, fromDSI, versionMap, tablePrefixInfo, tablesToLookUp, + diffDir).map(HashSet::new); + } catch (Exception exception) { + recordActivity(jobKey, SST_FILE_DELTA_FULL_DIFF); + LOG.warn("Failed to get SST diff file using RocksDBCheckpointDiffer. " + + "It will fallback to full diff now.", exception); + } } } @@ -1198,15 +1211,10 @@ Set getDeltaFiles(OmSnapshot fromSnapshot, // the slower approach. if (!useFullDiff) { LOG.warn("RocksDBCheckpointDiffer is not available, falling back to" + - " slow path"); + " slow path"); } recordActivity(jobKey, SST_FILE_DELTA_FULL_DIFF); - ManagedRocksDB fromDB = ((RDBStore)fromSnapshot.getMetadataManager().getStore()) - .getDb().getManagedRocksDb(); - ManagedRocksDB toDB = ((RDBStore)toSnapshot.getMetadataManager().getStore()) - .getDb().getManagedRocksDb(); - Set diffFiles = getDiffFiles(fromSnapshot, toSnapshot, tablesToLookUp); - RocksDiffUtils.filterRelevantSstFiles(diffFiles, tablePrefixInfo, tablesToLookUp, fromDB, toDB); + Set diffFiles = getDiffFiles(fromSnapshot, toSnapshot, tablesToLookUp, tablePrefixInfo); deltaFiles = Optional.of(diffFiles); } @@ -1215,25 +1223,42 @@ Set getDeltaFiles(OmSnapshot fromSnapshot, toSnapshot.getSnapshotTableKey())); } - private Set getDiffFiles(OmSnapshot fromSnapshot, OmSnapshot toSnapshot, Set tablesToLookUp) { + private Set getDiffFiles(OmSnapshot fromSnapshot, OmSnapshot toSnapshot, Set tablesToLookUp, + TablePrefixInfo tablePrefixInfo) { Set diffFiles; + Path fromSnapshotPath = fromSnapshot.getMetadataManager().getStore().getDbLocation().getAbsoluteFile().toPath(); + Path toSnapshotPath = toSnapshot.getMetadataManager().getStore().getDbLocation().getAbsoluteFile().toPath(); try { - Map fromSnapshotFiles = getSSTFileMapForSnapshot(fromSnapshot, tablesToLookUp); - Map toSnapshotFiles = getSSTFileMapForSnapshot(toSnapshot, tablesToLookUp); - diffFiles = Stream.concat( - fromSnapshotFiles.entrySet().stream() - .filter(e -> !toSnapshotFiles.containsKey(e.getKey())), - toSnapshotFiles.entrySet().stream() - .filter(e -> !fromSnapshotFiles.containsKey(e.getKey()))) - .map(Map.Entry::getValue) - .collect(Collectors.toSet()); + diffFiles = new HashSet<>(); + Map fromSnapshotFiles = filterRelevantSstFiles(getSSTFileMapForSnapshot(fromSnapshot, + tablesToLookUp), tablesToLookUp, tablePrefixInfo); + Map toSnapshotFiles = filterRelevantSstFiles(getSSTFileMapForSnapshot(toSnapshot, + tablesToLookUp), tablesToLookUp, tablePrefixInfo); + for (Map.Entry entry : fromSnapshotFiles.entrySet()) { + if (!toSnapshotFiles.containsKey(entry.getKey())) { + diffFiles.add(entry.getValue().getFilePath(fromSnapshotPath).toAbsolutePath().toString()); + } + } + for (Map.Entry entry : toSnapshotFiles.entrySet()) { + if (!fromSnapshotFiles.containsKey(entry.getKey())) { + diffFiles.add(entry.getValue().getFilePath(toSnapshotPath).toAbsolutePath().toString()); + } + } } catch (IOException e) { // In case of exception during inode read use all files LOG.error("Exception occurred while populating delta files for snapDiff", e); LOG.warn("Falling back to full file list comparison, inode-based optimization skipped."); + Set fromSnapshotFiles = filterRelevantSstFiles(getSSTFileListForSnapshot(fromSnapshot, + tablesToLookUp), tablesToLookUp, tablePrefixInfo); + Set toSnapshotFiles = filterRelevantSstFiles(getSSTFileListForSnapshot(toSnapshot, + tablesToLookUp), tablesToLookUp, tablePrefixInfo); diffFiles = new HashSet<>(); - diffFiles.addAll(getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp)); - diffFiles.addAll(getSSTFileListForSnapshot(toSnapshot, tablesToLookUp)); + for (SstFileInfo sstFileInfo : fromSnapshotFiles) { + diffFiles.add(sstFileInfo.getFilePath(fromSnapshotPath).toAbsolutePath().toString()); + } + for (SstFileInfo sstFileInfo : toSnapshotFiles) { + diffFiles.add(sstFileInfo.getFilePath(toSnapshotPath).toAbsolutePath().toString()); + } } return diffFiles; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 840ef6eaeb8f..3976fec41871 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -49,6 +49,7 @@ import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.IN_PROGRESS; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.QUEUED; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.REJECTED; +import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.SST_FILE_EXTENSION; import static org.apache.ratis.util.JavaUtils.attempt; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -84,6 +85,7 @@ import jakarta.annotation.Nonnull; import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -103,6 +105,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.LongStream; @@ -126,6 +129,7 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -136,6 +140,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; import org.apache.hadoop.ozone.om.lock.OmReadOnlyLock; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.om.snapshot.SnapshotTestUtils.StubbedPersistentMap; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse.CancelMessage; @@ -147,6 +152,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ExitUtil; import org.apache.ozone.rocksdb.util.RdbUtil; +import org.apache.ozone.rocksdb.util.SstFileInfo; import org.apache.ozone.rocksdb.util.SstFileSetReader; import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; @@ -228,6 +234,9 @@ public class TestSnapshotDiffManager { @Mock private RocksIterator jobTableIterator; + @Mock + private OmSnapshotLocalDataManager localDataManager; + @Mock private OmSnapshotManager omSnapshotManager; @@ -378,7 +387,7 @@ public void init() throws RocksDBException, IOException, ExecutionException { return snapshotCache.get(snapInfo.getSnapshotId()); }); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - snapshotDiffManager = new SnapshotDiffManager(db, differ, ozoneManager, + snapshotDiffManager = new SnapshotDiffManager(db, differ, ozoneManager, localDataManager, snapDiffJobTable, snapDiffReportTable, columnFamilyOptions, codecRegistry); when(omSnapshotManager.getDiffCleanupServiceInterval()).thenReturn(0L); } @@ -434,10 +443,12 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { when(differ.getSSTDiffListWithFullPath( any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + anyMap(), + any(TablePrefixInfo.class), anySet(), eq(diffDir)) ).thenReturn(Optional.of(Lists.newArrayList(randomStrings))); - + mockSnapshotLocalData(); UncheckedAutoCloseableSupplier rcFromSnapshot = omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); UncheckedAutoCloseableSupplier rcToSnapshot = @@ -448,28 +459,33 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap2); when(jobTableIterator.isValid()).thenReturn(false); - try (MockedStatic mockedRdbUtil = Mockito.mockStatic(RdbUtil.class, Mockito.CALLS_REAL_METHODS); - MockedStatic mockedRocksDiffUtils = Mockito.mockStatic(RocksDiffUtils.class, - Mockito.CALLS_REAL_METHODS)) { - mockedRdbUtil.when(() -> RdbUtil.getSSTFilesForComparison(any(), any())) - .thenReturn(Collections.singleton(RandomStringUtils.secure().nextAlphabetic(10))); - mockedRocksDiffUtils.when(() -> RocksDiffUtils.filterRelevantSstFiles(any(), any(), anySet())) - .thenAnswer(i -> null); - SnapshotDiffManager spy = spy(snapshotDiffManager); - doNothing().when(spy).recordActivity(any(), any()); - doNothing().when(spy).updateProgress(anyString(), anyDouble()); - Set deltaFiles = spy.getDeltaFiles( - fromSnapshot, - toSnapshot, - Sets.newHashSet("cf1", "cf2"), fromSnapshotInfo, - toSnapshotInfo, false, - new TablePrefixInfo(Collections.emptyMap()), diffDir, diffJobKey); - assertEquals(randomStrings, deltaFiles); - } + + SnapshotDiffManager spy = spy(snapshotDiffManager); + doNothing().when(spy).recordActivity(any(), any()); + doNothing().when(spy).updateProgress(anyString(), anyDouble()); + Set deltaFiles = spy.getDeltaFiles( + fromSnapshot, + toSnapshot, + Sets.newHashSet("cf1", "cf2"), fromSnapshotInfo, + toSnapshotInfo, false, + new TablePrefixInfo(Collections.emptyMap()), diffDir, diffJobKey); + assertEquals(randomStrings, deltaFiles); + rcFromSnapshot.close(); rcToSnapshot.close(); } + private void mockSnapshotLocalData() throws IOException { + OmSnapshotLocalData localData = mock(OmSnapshotLocalData.class); + ReadableOmSnapshotLocalDataProvider snapProvider = mock(ReadableOmSnapshotLocalDataProvider.class); + when(snapProvider.getPreviousSnapshotLocalData()).thenReturn(localData); + when(snapProvider.getSnapshotLocalData()).thenReturn(localData); + OmSnapshotLocalData.VersionMeta versionMeta = mock(OmSnapshotLocalData.VersionMeta.class); + when(versionMeta.getSstFiles()).thenReturn(Collections.emptyList()); + when(localData.getVersionSstFileInfos()).thenReturn(ImmutableMap.of(0, versionMeta)); + when(localDataManager.getOmSnapshotLocalData(any(UUID.class), any(UUID.class))).thenReturn(snapProvider); + } + @ParameterizedTest @CsvSource({"0,true", "1,true", "2,true", "5,true", "10,true", "100,true", "1000,true", "10000,true", "0,false", "1,false", "2,false", "5,false", @@ -483,26 +499,27 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, Set deltaStrings = new HashSet<>(); mockedRdbUtil.when( - () -> RdbUtil.getSSTFilesForComparison(any(), anySet())) - .thenAnswer((Answer>) invocation -> { - Set retVal = IntStream.range(0, numberOfFiles) + () -> RdbUtil.getSSTFilesWithInodesForComparison(any(), anySet())) + .thenAnswer(invocation -> { + Map retVal = IntStream.range(0, numberOfFiles) .mapToObj(i -> RandomStringUtils.secure().nextAlphabetic(10)) - .collect(Collectors.toSet()); - deltaStrings.addAll(retVal); + .collect(Collectors.toMap(Function.identity(), + i -> new SstFileInfo(i, null, null, null))); + deltaStrings.addAll(retVal.keySet().stream().map(Object::toString).collect(Collectors.toSet())); return retVal; }); mockedRocksDiffUtils.when(() -> - RocksDiffUtils.filterRelevantSstFiles(anySet(), any(), anyMap(), anySet(), any(ManagedRocksDB.class), - any(ManagedRocksDB.class))) - .thenAnswer((Answer) invocationOnMock -> { - invocationOnMock.getArgument(0, Set.class).stream() + RocksDiffUtils.filterRelevantSstFiles(anyMap(), anySet(), any())) + .thenAnswer(invocationOnMock -> { + invocationOnMock.getArgument(0, Map.class).entrySet().stream() .findAny().ifPresent(val -> { - assertTrue(deltaStrings.contains(val)); - invocationOnMock.getArgument(0, Set.class).remove(val); - deltaStrings.remove(val); + Map.Entry entry = (Map.Entry) val; + assertTrue(deltaStrings.contains(entry.getKey())); + invocationOnMock.getArgument(0, Map.class).remove(entry.getKey()); + deltaStrings.remove(entry.getKey()); }); - return null; + return invocationOnMock.getArgument(0, Map.class); }); UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); @@ -515,11 +532,13 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, when(differ.getSSTDiffListWithFullPath( any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + anyMap(), + any(TablePrefixInfo.class), anySet(), anyString())) - .thenReturn(Optional.ofNullable(Collections.emptyList())); + .thenReturn(Optional.empty()); } - + mockSnapshotLocalData(); UncheckedAutoCloseableSupplier rcFromSnapshot = omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); UncheckedAutoCloseableSupplier rcToSnapshot = @@ -542,7 +561,12 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, false, new TablePrefixInfo(Collections.emptyMap()), snapDiffDir.getAbsolutePath(), diffJobKey); - assertEquals(deltaStrings, deltaFiles); + assertEquals(deltaStrings.stream() + .map(i -> dbStore.getDbLocation().toPath().resolve(i + SST_FILE_EXTENSION).toAbsolutePath().toString()) + .collect(Collectors.toSet()), deltaFiles); + if (useFullDiff && numberOfFiles > 1) { + assertThat(deltaFiles).isNotEmpty(); + } } } @@ -566,8 +590,7 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) }); mockedRocksDiffUtils.when(() -> - RocksDiffUtils.filterRelevantSstFiles(anySet(), any(), anyMap(), anySet(), any(ManagedRocksDB.class), - any(ManagedRocksDB.class))) + RocksDiffUtils.filterRelevantSstFiles(anySet(), anySet(), any())) .thenAnswer((Answer) invocationOnMock -> { invocationOnMock.getArgument(0, Set.class).stream() .findAny().ifPresent(val -> { @@ -589,6 +612,8 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) .getSSTDiffListWithFullPath( any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + anyMap(), + any(TablePrefixInfo.class), anySet(), anyString()); @@ -606,6 +631,7 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) SnapshotDiffManager spy = spy(snapshotDiffManager); doNothing().when(spy).recordActivity(any(), any()); doNothing().when(spy).updateProgress(anyString(), anyDouble()); + mockSnapshotLocalData(); Set deltaFiles = spy.getDeltaFiles( fromSnapshot, toSnapshot, @@ -1541,33 +1567,36 @@ public void testGetDeltaFilesWithFullDiff() throws IOException { SnapshotDiffManager spy = spy(snapshotDiffManager); UUID snap1 = UUID.randomUUID(); OmSnapshot fromSnapshot = getMockedOmSnapshot(snap1); + Path fromSnapshotPath = fromSnapshot.getMetadataManager().getStore().getDbLocation().toPath(); UUID snap2 = UUID.randomUUID(); OmSnapshot toSnapshot = getMockedOmSnapshot(snap2); + Path toSnapshotPath = toSnapshot.getMetadataManager().getStore().getDbLocation().toPath(); Mockito.doAnswer(invocation -> { OmSnapshot snapshot = invocation.getArgument(0); if (snapshot == fromSnapshot) { - Map inodeToFileMap = new HashMap<>(); - inodeToFileMap.put(1, "1.sst"); - inodeToFileMap.put(2, "2.sst"); - inodeToFileMap.put(3, "3.sst"); + Map inodeToFileMap = new HashMap<>(); + inodeToFileMap.put(1, new SstFileInfo("1", null, null, null)); + inodeToFileMap.put(2, new SstFileInfo("2", null, null, null)); + inodeToFileMap.put(3, new SstFileInfo("3", null, null, null)); return inodeToFileMap; } if (snapshot == toSnapshot) { - Map inodeToFileMap = new HashMap<>(); - inodeToFileMap.put(1, "10.sst"); - inodeToFileMap.put(2, "20.sst"); - inodeToFileMap.put(4, "4.sst"); + Map inodeToFileMap = new HashMap<>(); + inodeToFileMap.put(1, new SstFileInfo("10", null, null, null)); + inodeToFileMap.put(2, new SstFileInfo("20", null, null, null)); + inodeToFileMap.put(4, new SstFileInfo("4", null, null, null)); return inodeToFileMap; } return null; - }).when(spy).getSSTFileMapForSnapshot(Mockito.any(OmSnapshot.class), - Mockito.anySet()); + }).when(spy).getSSTFileMapForSnapshot(Mockito.any(OmSnapshot.class), Mockito.anySet()); doNothing().when(spy).recordActivity(any(), any()); doNothing().when(spy).updateProgress(anyString(), anyDouble()); String diffJobKey = snap1 + DELIMITER + snap2; + Set deltaFiles = spy.getDeltaFiles(fromSnapshot, toSnapshot, Collections.emptySet(), snapshotInfo, snapshotInfo, true, new TablePrefixInfo(Collections.emptyMap()), null, diffJobKey); - Assertions.assertEquals(Sets.newHashSet("3.sst", "4.sst"), deltaFiles); + Assertions.assertEquals(Sets.newHashSet(fromSnapshotPath.resolve("3.sst").toAbsolutePath().toString(), + toSnapshotPath.resolve("4.sst").toAbsolutePath().toString()), deltaFiles); } @Test From 3771ff4ebbecfea068442702c9ba0d4f7887ff01 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 9 Nov 2025 08:47:56 -0500 Subject: [PATCH 102/126] HDDS-13901. OmSnaphshotLocalDataManager should throw IOException if unable to resolve to a previous snapshot id Change-Id: I9382a816415880596534060a841b2840fc5c2b1f --- .../snapshot/OmSnapshotLocalDataManager.java | 13 +++++++ .../TestOmSnapshotLocalDataManager.java | 38 ++++++++++++------- 2 files changed, 37 insertions(+), 14 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 70955fa05783..0e665167283d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -657,6 +657,13 @@ private LockDataProviderInitResult initialize( currentIteratedSnapshotId, snapId, toResolveSnapshotId)); } UUID previousId = previousIds.iterator().next(); + // If the previousId is null and if toResolveSnapshotId is not null then should throw an exception since + // the snapshot can never be resolved against the toResolveSnapshotId. + if (previousId == null) { + throw new IOException(String.format( + "Snapshot %s versions previousId is null thus %s cannot be resolved against id %s", + currentIteratedSnapshotId, snapId, toResolveSnapshotId)); + } HierarchicalResourceLock previousToPreviousReadLockAcquired = acquireLock(previousId, true); try { // Get the version node for the snapshot and update the version node to the successor to point to the @@ -705,6 +712,12 @@ private LockDataProviderInitResult initialize( // Set the previous snapshot version to the relativePreviousVersionNode which was captured. versionMeta.setPreviousSnapshotVersion(relativePreviousVersionNode.getVersion()); } + } else if (toResolveSnapshotId != null) { + // If the previousId is null and if toResolveSnapshotId is not null then should throw an exception since + // the snapshot can never be resolved against the toResolveSnapshotId. + throw new IOException(String.format("Unable to resolve previous snapshot id for snapshot: %s against " + + "previous snapshotId : %s since current snapshot's previousSnapshotId is null", + snapId, toResolveSnapshotId)); } else { toResolveSnapshotId = null; ssLocalData.setPreviousSnapshotId(null); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 9aa56d2dd027..e3b5d343b368 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -685,7 +685,7 @@ public void testVersionResolution(boolean read) throws IOException { addVersionsToLocalData(localDataManager, snapshotIds.get(i), versionMaps.get(i)); } for (int start = 0; start < snapshotIds.size(); start++) { - for (int end = start + 1; end < snapshotIds.size(); end++) { + for (int end = 0; end < snapshotIds.size(); end++) { UUID prevSnapId = snapshotIds.get(start); UUID snapId = snapshotIds.get(end); Map versionMap = new HashMap<>(versionMaps.get(end)); @@ -695,19 +695,29 @@ public void testVersionResolution(boolean read) throws IOException { version.setValue(versionMaps.get(idx).getOrDefault(version.getValue(), 0)); } } - try (ReadableOmSnapshotLocalDataProvider snap = read ? - localDataManager.getOmSnapshotLocalData(snapId, prevSnapId) : - localDataManager.getWritableOmSnapshotLocalData(snapId, prevSnapId)) { - OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); - OmSnapshotLocalData prevSnapshotLocalData = snap.getPreviousSnapshotLocalData(); - assertEquals(prevSnapshotLocalData.getSnapshotId(), snapshotLocalData.getPreviousSnapshotId()); - assertEquals(prevSnapId, snapshotLocalData.getPreviousSnapshotId()); - assertEquals(snapId, snapshotLocalData.getSnapshotId()); - assertTrue(snapshotLocalData.getVersionSstFileInfos().size() > 1); - snapshotLocalData.getVersionSstFileInfos() - .forEach((version, versionMeta) -> { - assertEquals(versionMap.get(version), versionMeta.getPreviousSnapshotVersion()); - }); + if (start >= end) { + assertThrows(IOException.class, () -> { + if (read) { + localDataManager.getOmSnapshotLocalData(snapId, prevSnapId); + } else { + localDataManager.getWritableOmSnapshotLocalData(snapId, prevSnapId); + } + }); + } else { + try (ReadableOmSnapshotLocalDataProvider snap = read ? + localDataManager.getOmSnapshotLocalData(snapId, prevSnapId) : + localDataManager.getWritableOmSnapshotLocalData(snapId, prevSnapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + OmSnapshotLocalData prevSnapshotLocalData = snap.getPreviousSnapshotLocalData(); + assertEquals(prevSnapshotLocalData.getSnapshotId(), snapshotLocalData.getPreviousSnapshotId()); + assertEquals(prevSnapId, snapshotLocalData.getPreviousSnapshotId()); + assertEquals(snapId, snapshotLocalData.getSnapshotId()); + assertTrue(snapshotLocalData.getVersionSstFileInfos().size() > 1); + snapshotLocalData.getVersionSstFileInfos() + .forEach((version, versionMeta) -> { + assertEquals(versionMap.get(version), versionMeta.getPreviousSnapshotVersion()); + }); + } } } } From 6fbf8c7924e760cdf1e104927e1acbceee32b4ed Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 10 Nov 2025 17:35:24 -0500 Subject: [PATCH 103/126] HDDS-13849. Address review comments Change-Id: I06301f780ff7fff2ab652722872b1707501d61ec --- .../org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java | 4 ++++ .../hadoop/ozone/om/service/TestKeyDeletingService.java | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index 65db6e9b69f4..44cbc45ad6b5 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -621,6 +621,7 @@ private String trimSSTFilename(String filename) { * Read the current Live manifest for a given RocksDB instance (Active or * Checkpoint). * @param rocksDB open rocksDB instance. + * @param tableFilter set of column-family/table names to include when collecting live SSTs. * @return a list of SST files (without extension) in the DB. */ public Set readRocksDBLiveFiles(ManagedRocksDB rocksDB, Set tableFilter) { @@ -822,6 +823,7 @@ private String getSSTFullPath(String sstFilenameWithoutExtension, * * @param src source snapshot * @param dest destination snapshot + * @param tablesToLookup tablesToLookup set of table (column family) names used to restrict which SST files to return. * @param sstFilesDirForSnapDiffJob dir to create hardlinks for SST files * for snapDiff job. * @return A list of SST files without extension. @@ -856,6 +858,8 @@ public synchronized Optional> getSSTDiffListWithFullPath(DifferSnap * * @param src source snapshot * @param dest destination snapshot + * @param tablesToLookup tablesToLookup Set of column-family (table) names to include when reading SST files; + * must be non-null. * @return A list of SST files without extension. e.g. ["000050", "000060"] */ public synchronized Optional> getSSTDiffList(DifferSnapshotInfo src, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 15d29d14a7d2..3dcddaeeafa9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -1260,7 +1260,6 @@ private static boolean assertTableRowCount(long expectedCount, LOG.info("{} actual row count={}, expectedCount={}", table.getName(), count.get(), expectedCount); }); - System.out.println("Swaminathan \t" + count.get() + "\t" + expectedCount); return count.get() == expectedCount; } From ea64602938b0062d44b553d30e53ac052442b36c Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 10 Nov 2025 17:41:29 -0500 Subject: [PATCH 104/126] HDDS-13849. Address review comments Change-Id: Ic5ea1918e91b86a0b2c410bfc1b4c4206858a2f1 --- .../org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 9721d6bc44c7..e7826708b895 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -1861,7 +1861,7 @@ public boolean containsIncompleteMPUs(String volume, String bucket) return false; } - // NOTE: Update both getTableBucketPrefixInfo(volume, bucket) & getTableBucketPrefix(tableName, volume, bucket) + // NOTE: Update both getTableBucketPrefix(volume, bucket) & getTableBucketPrefix(tableName, volume, bucket) // simultaneously. Implemented duplicate functions to avoid computing bucketKeyPrefix redundantly for each and // every table over and over again. @Override @@ -1909,6 +1909,8 @@ public String getTableBucketPrefix(String tableName, String volume, String bucke case OPEN_FILE_TABLE: return getBucketKeyPrefixFSO(volume, bucket); default: + LOG.warn("Unknown table name '{}' passed to getTableBucketPrefix (volume='{}', bucket='{}'). " + + "Returning empty string.", tableName, volume, bucket); return ""; } } From 1507659087204189e0871b960f4239c7080b152e Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 10 Nov 2025 17:56:12 -0500 Subject: [PATCH 105/126] HDDS-13849. Address review comments Change-Id: I13eb719258714697f875d0b64d38e1185ba75db1 --- .../org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 9721d6bc44c7..e7826708b895 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -1861,7 +1861,7 @@ public boolean containsIncompleteMPUs(String volume, String bucket) return false; } - // NOTE: Update both getTableBucketPrefixInfo(volume, bucket) & getTableBucketPrefix(tableName, volume, bucket) + // NOTE: Update both getTableBucketPrefix(volume, bucket) & getTableBucketPrefix(tableName, volume, bucket) // simultaneously. Implemented duplicate functions to avoid computing bucketKeyPrefix redundantly for each and // every table over and over again. @Override @@ -1909,6 +1909,8 @@ public String getTableBucketPrefix(String tableName, String volume, String bucke case OPEN_FILE_TABLE: return getBucketKeyPrefixFSO(volume, bucket); default: + LOG.warn("Unknown table name '{}' passed to getTableBucketPrefix (volume='{}', bucket='{}'). " + + "Returning empty string.", tableName, volume, bucket); return ""; } } From 9d4dd269283a6aabdc8d613370d48b64dcfeda52 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 11 Nov 2025 09:18:05 -0500 Subject: [PATCH 106/126] HDDS-13867. Fix Snaphsot dag diff exception handling Change-Id: I450d29c5871cc8bb84a067651c6802b74bcde8bd --- .../ozone/om/snapshot/SnapshotDiffManager.java | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 78af0d9dd9d2..1c7258d74cbf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -1193,16 +1193,14 @@ Set getDeltaFiles(OmSnapshot fromSnapshot, recordActivity(jobKey, SST_FILE_DELTA_DAG_WALK); LOG.debug("Calling RocksDBCheckpointDiffer"); - try { - final Map versionMap = toSnapshotLocalData.getVersionSstFileInfos().entrySet() - .stream().collect(toMap(Map.Entry::getKey, entry -> entry.getValue().getPreviousSnapshotVersion())); - deltaFiles = differ.getSSTDiffListWithFullPath(toDSI, fromDSI, versionMap, tablePrefixInfo, tablesToLookUp, - diffDir).map(HashSet::new); - } catch (Exception exception) { - recordActivity(jobKey, SST_FILE_DELTA_FULL_DIFF); - LOG.warn("Failed to get SST diff file using RocksDBCheckpointDiffer. " + - "It will fallback to full diff now.", exception); - } + final Map versionMap = toSnapshotLocalData.getVersionSstFileInfos().entrySet() + .stream().collect(toMap(Map.Entry::getKey, entry -> entry.getValue().getPreviousSnapshotVersion())); + deltaFiles = differ.getSSTDiffListWithFullPath(toDSI, fromDSI, versionMap, tablePrefixInfo, tablesToLookUp, + diffDir).map(HashSet::new); + } catch (Exception exception) { + recordActivity(jobKey, SST_FILE_DELTA_FULL_DIFF); + LOG.warn("Failed to get SST diff file using RocksDBCheckpointDiffer. " + + "It will fallback to full diff now.", exception); } } From d696ffaf04c35f7e4a03ca38d3a981cd42b3065f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 11 Nov 2025 18:42:20 -0500 Subject: [PATCH 107/126] HDDS-13912. Modularise Snapshot Delta file computer (Full Diff) Change-Id: If8719d9fb9ef077cc7a68db8b61ebb736b23071b --- .../org/apache/hadoop/hdds/utils/IOUtils.java | 14 + .../apache/ozone/rocksdb/util/RdbUtil.java | 6 +- .../hadoop/ozone/om/TestOMRatisSnapshots.java | 8 +- .../ozone/om/OMDBCheckpointServlet.java | 4 +- .../ozone/om/snapshot/OmSnapshotUtils.java | 14 +- .../diff/delta/DeltaFileComputer.java | 51 ++ .../diff/delta/FileLinkDeltaFileComputer.java | 139 ++++++ .../snapshot/diff/delta/FullDiffComputer.java | 117 +++++ .../om/snapshot/diff/delta/package-info.java | 21 + .../ozone/om/TestOmSnapshotManager.java | 2 +- .../om/snapshot/TestOmSnapshotUtils.java | 2 +- .../delta/TestFileLinkDeltaFileComputer.java | 448 ++++++++++++++++++ .../diff/delta/TestFullDiffComputer.java | 339 +++++++++++++ 13 files changed, 1141 insertions(+), 24 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/DeltaFileComputer.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java index ce42c9660e45..026a4d019d27 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java @@ -17,12 +17,15 @@ package org.apache.hadoop.hdds.utils; +import com.google.common.annotations.VisibleForTesting; import jakarta.annotation.Nonnull; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributes; import java.util.Arrays; import java.util.Collection; import java.util.Properties; @@ -119,4 +122,15 @@ public static void writePropertiesToFile(File file, Properties properties) throw } return props; } + + /** + * Get the INode for file. + * + * @param file File whose INode is to be retrieved. + * @return INode for file. + */ + @VisibleForTesting + public static Object getINode(Path file) throws IOException { + return Files.readAttributes(file, BasicFileAttributes.class).fileKey(); + } } diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java index ac88102f800a..03efd2f696a5 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/RdbUtil.java @@ -17,11 +17,11 @@ package org.apache.ozone.rocksdb.util; +import static org.apache.hadoop.hdds.utils.IOUtils.getINode; + import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.nio.file.attribute.BasicFileAttributes; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -57,7 +57,7 @@ public static Map getSSTFilesWithInodesForComparison( Map inodeToSstMap = new HashMap<>(); for (LiveFileMetaData lfm : liveSSTFilesForCFs) { Path sstFilePath = Paths.get(lfm.path(), lfm.fileName()); - Object inode = Files.readAttributes(sstFilePath, BasicFileAttributes.class).fileKey(); + Object inode = getINode(sstFilePath); inodeToSstMap.put(inode, new SstFileInfo(lfm)); } return inodeToSstMap; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index 051addfd8ff3..9299c8193419 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -360,15 +360,15 @@ private void checkSnapshot(OzoneManager leaderOM, OzoneManager followerOM, } // If it is a hard link on the leader, it should be a hard // link on the follower - if (OmSnapshotUtils.getINode(leaderActiveSST) - .equals(OmSnapshotUtils.getINode(leaderSnapshotSST))) { + if (org.apache.hadoop.hdds.utils.IOUtils.getINode(leaderActiveSST) + .equals(org.apache.hadoop.hdds.utils.IOUtils.getINode(leaderSnapshotSST))) { Path followerSnapshotSST = Paths.get(followerSnapshotDir.toString(), fileName); Path followerActiveSST = Paths.get(followerActiveDir.toString(), fileName); assertEquals( - OmSnapshotUtils.getINode(followerActiveSST), - OmSnapshotUtils.getINode(followerSnapshotSST), + org.apache.hadoop.hdds.utils.IOUtils.getINode(followerActiveSST), + org.apache.hadoop.hdds.utils.IOUtils.getINode(followerSnapshotSST), "Snapshot sst file is supposed to be a hard link"); hardLinkCount++; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java index efe9fc0aeea9..a24719eb5e96 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hdds.utils.Archiver.includeFile; import static org.apache.hadoop.hdds.utils.Archiver.tar; import static org.apache.hadoop.hdds.utils.HddsServerUtil.includeRatisSnapshotCompleteFlag; +import static org.apache.hadoop.hdds.utils.IOUtils.getINode; import static org.apache.hadoop.ozone.OzoneConsts.OM_CHECKPOINT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_CHECKPOINT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIR; @@ -533,8 +534,7 @@ private static Path findLinkPath(Map> files, Path file) // Check if the files are hard linked to each other. // Note comparison must be done against srcPath, because // destPath may only exist on Follower. - if (OmSnapshotUtils.getINode(srcPath).equals( - OmSnapshotUtils.getINode(file))) { + if (getINode(srcPath).equals(getINode(file))) { return destPath; } else { LOG.info("Found non linked sst files with the same name: {}, {}", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java index 55132f71d5cd..728d4a8e9cea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java @@ -17,16 +17,15 @@ package org.apache.hadoop.ozone.om.snapshot; +import static org.apache.hadoop.hdds.utils.IOUtils.getINode; import static org.apache.hadoop.ozone.OzoneConsts.OM_CHECKPOINT_DIR; -import com.google.common.annotations.VisibleForTesting; import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.nio.file.attribute.BasicFileAttributes; import java.nio.file.attribute.FileTime; import java.util.ArrayList; import java.util.List; @@ -60,17 +59,6 @@ public static String truncateFileName(int truncateLength, Path file) { return file.toString().substring(truncateLength); } - /** - * Get the INode for file. - * - * @param file File whose INode is to be retrieved. - * @return INode for file. - */ - @VisibleForTesting - public static Object getINode(Path file) throws IOException { - return Files.readAttributes(file, BasicFileAttributes.class).fileKey(); - } - /** * Returns a string combining the inode (fileKey) and the last modification time (mtime) of the given file. *

diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/DeltaFileComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/DeltaFileComputer.java new file mode 100644 index 000000000000..b93dfae3c657 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/DeltaFileComputer.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.diff.delta; + +import java.io.Closeable; +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Optional; +import java.util.Set; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.ozone.rocksdb.util.SstFileInfo; + +/** + * The DeltaFileComputer interface defines a contract for computing delta files + * that represent changes between two snapshots. Implementations of this + * interface are responsible for determining the modifications made from a + * baseline snapshot to a target snapshot in the form of delta files. + */ +public interface DeltaFileComputer extends Closeable { + + /** + * Retrieves the delta files representing changes between two snapshots for specified tables. + * + * @param fromSnapshot the baseline snapshot from which changes are computed + * @param toSnapshot the target snapshot to which changes are compared + * @param tablesToLookup the set of table names to consider when determining changes + * @return an {@code Optional} containing a collection of pairs, where each pair consists of a + * {@code Path} representing the delta file and an associated {@code SstFileInfo}, or + * an empty {@code Optional} if no changes are found + * @throws IOException if an I/O error occurs while retrieving delta files + */ + Optional>> getDeltaFiles(SnapshotInfo fromSnapshot, SnapshotInfo toSnapshot, + Set tablesToLookup) throws IOException; +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java new file mode 100644 index 000000000000..8ca1c8be51c9 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.diff.delta; + +import static java.nio.file.Files.createDirectories; +import static org.apache.commons.io.FilenameUtils.getExtension; +import static org.apache.commons.io.file.PathUtils.deleteDirectory; + +import java.io.IOException; +import java.nio.file.FileAlreadyExistsException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus; +import org.apache.ozone.rocksdb.util.SstFileInfo; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The {@code FileLinkDeltaFileComputer} is an abstract class that provides a + * base implementation for the {@code DeltaFileComputer} interface. It is + * responsible for computing delta files by creating hard links to the + * relevant source files in a specified delta directory, enabling a compact + * representation of changes between snapshots. + * + * This class encapsulates the logic for managing snapshots and metadata, + * creating hard links for delta representation, and reporting activity + * during the computation process. + */ +public abstract class FileLinkDeltaFileComputer implements DeltaFileComputer { + + private static final Logger LOG = LoggerFactory.getLogger(FileLinkDeltaFileComputer.class); + private final OmSnapshotManager omSnapshotManager; + private final OMMetadataManager activeMetadataManager; + private final Consumer activityReporter; + private Path deltaDir; + private AtomicInteger linkFileCounter = new AtomicInteger(0); + + FileLinkDeltaFileComputer(OmSnapshotManager snapshotManager, OMMetadataManager activeMetadataManager, + Path deltaDirPath, Consumer activityReporter) throws IOException { + this.deltaDir = deltaDirPath.toAbsolutePath(); + this.omSnapshotManager = snapshotManager; + this.activityReporter = activityReporter; + this.activeMetadataManager = activeMetadataManager; + createDirectories(deltaDir); + } + + /** + * Computes the delta files between two snapshots based on the provided parameters. + * The method determines the differences in data between the `fromSnapshot` and + * `toSnapshot` and generates a mapping of paths to pairs consisting of a resolved + * path and corresponding SST file information. + * + * @param fromSnapshot the source snapshot from which changes are calculated + * @param toSnapshot the target snapshot up to which changes are calculated + * @param tablesToLookup a set of table names to filter the tables that should be considered + * @param tablePrefixInfo information about table prefixes to apply during computation + * @return an Optional containing a map where the key is the delta file path, and the value + * is a pair consisting of a resolved path and the corresponding SST file information. + * If there are no delta files, returns an empty Optional. + * @throws IOException if an I/O error occurs during the computation process + */ + abstract Optional>> computeDeltaFiles(SnapshotInfo fromSnapshot, + SnapshotInfo toSnapshot, Set tablesToLookup, TablePrefixInfo tablePrefixInfo) throws IOException; + + public Optional>> getDeltaFiles(SnapshotInfo fromSnapshot, SnapshotInfo toSnapshot, + Set tablesToLookup) throws IOException { + TablePrefixInfo tablePrefixInfo = activeMetadataManager.getTableBucketPrefix(fromSnapshot.getVolumeName(), + fromSnapshot.getBucketName()); + return computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo).map(Map::values); + } + + void updateActivity(SubStatus status) { + activityReporter.accept(status); + } + + Path createLink(Path path) throws IOException { + Path source = path.toAbsolutePath(); + Path link = deltaDir.resolve(linkFileCounter.incrementAndGet() + + "." + getExtension(source.getFileName().toString())); + try { + Files.createLink(link, source); + } catch (FileAlreadyExistsException ignored) { + LOG.debug("File for source {} already exists: at {}", source, link); + } + return link; + } + + ReadableOmSnapshotLocalDataProvider getLocalDataProvider(UUID snapshotId, UUID toResolveSnapshotId) + throws IOException { + return omSnapshotManager.getSnapshotLocalDataManager().getOmSnapshotLocalData(snapshotId, toResolveSnapshotId); + } + + UncheckedAutoCloseableSupplier getSnapshot(SnapshotInfo snapshotInfo) throws IOException { + return omSnapshotManager.getActiveSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), + snapshotInfo.getName()); + } + + OMMetadataManager getActiveMetadataManager() { + return activeMetadataManager; + } + + @Override + public void close() throws IOException { + if (deltaDir == null || Files.notExists(deltaDir)) { + return; + } + deleteDirectory(deltaDir); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java new file mode 100644 index 000000000000..76e83ba36923 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.diff.delta; + +import static org.apache.ozone.rocksdiff.RocksDiffUtils.filterRelevantSstFiles; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Consumer; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus; +import org.apache.ozone.rocksdb.util.RdbUtil; +import org.apache.ozone.rocksdb.util.SstFileInfo; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * FullDiffComputer is a specialized implementation of FileLinkDeltaFileComputer + * that computes the delta files between two snapshots. It identifies the differences + * in files and generates corresponding links for easier processing of snapshot diffs. + * This implementation handles cases of optimized inode-based comparisons as well as + * fallback with full file list comparisons in case of exceptions. + */ +class FullDiffComputer extends FileLinkDeltaFileComputer { + + private static final Logger LOG = LoggerFactory.getLogger(FullDiffComputer.class); + + FullDiffComputer(OmSnapshotManager snapshotManager, OMMetadataManager activeMetadataManager, Path deltaDirPath, + Consumer activityReporter) throws IOException { + super(snapshotManager, activeMetadataManager, deltaDirPath, activityReporter); + } + + @Override + Optional>> computeDeltaFiles(SnapshotInfo fromSnapshotInfo, + SnapshotInfo toSnapshotInfo, Set tablesToLookup, TablePrefixInfo tablePrefixInfo) throws IOException { + try (UncheckedAutoCloseableSupplier fromSnapHandle = getSnapshot(fromSnapshotInfo); + UncheckedAutoCloseableSupplier toSnapHandle = getSnapshot(toSnapshotInfo)) { + OmSnapshot fromSnapshot = fromSnapHandle.get(); + OmSnapshot toSnapshot = toSnapHandle.get(); + Path fromSnapshotPath = fromSnapshot.getMetadataManager().getStore().getDbLocation().getAbsoluteFile().toPath(); + Path toSnapshotPath = toSnapshot.getMetadataManager().getStore().getDbLocation().getAbsoluteFile().toPath(); + Map> paths = new HashMap<>(); + try { + Map fromSnapshotFiles = getSSTFileMapForSnapshot(fromSnapshot, tablesToLookup, + tablePrefixInfo); + Map toSnapshotFiles = getSSTFileMapForSnapshot(toSnapshot, tablesToLookup, + tablePrefixInfo); + for (Map.Entry entry : fromSnapshotFiles.entrySet()) { + if (!toSnapshotFiles.containsKey(entry.getKey())) { + Path source = entry.getValue().getFilePath(fromSnapshotPath); + paths.put(source, Pair.of(createLink(source), entry.getValue())); + } + } + for (Map.Entry entry : toSnapshotFiles.entrySet()) { + if (!fromSnapshotFiles.containsKey(entry.getKey())) { + Path source = entry.getValue().getFilePath(toSnapshotPath); + paths.put(source, Pair.of(createLink(source), entry.getValue())); + } + } + } catch (IOException e) { + // In case of exception during inode read use all files + LOG.error("Exception occurred while populating delta files for snapDiff", e); + LOG.warn("Falling back to full file list comparison, inode-based optimization skipped."); + paths.clear(); + Set fromSnapshotFiles = getSSTFileListForSnapshot(fromSnapshot, tablesToLookup, tablePrefixInfo); + Set toSnapshotFiles = getSSTFileListForSnapshot(toSnapshot, tablesToLookup, tablePrefixInfo); + for (SstFileInfo sstFileInfo : fromSnapshotFiles) { + Path source = sstFileInfo.getFilePath(fromSnapshotPath); + paths.put(source, Pair.of(createLink(source), sstFileInfo)); + } + for (SstFileInfo sstFileInfo : toSnapshotFiles) { + Path source = sstFileInfo.getFilePath(toSnapshotPath); + paths.put(source, Pair.of(createLink(source), sstFileInfo)); + } + } + return Optional.of(paths); + } + } + + static Map getSSTFileMapForSnapshot(OmSnapshot snapshot, + Set tablesToLookUp, TablePrefixInfo tablePrefixInfo) throws IOException { + return filterRelevantSstFiles(RdbUtil.getSSTFilesWithInodesForComparison(((RDBStore)snapshot.getMetadataManager() + .getStore()).getDb().getManagedRocksDb(), tablesToLookUp), tablesToLookUp, tablePrefixInfo); + } + + static Set getSSTFileListForSnapshot(OmSnapshot snapshot, Set tablesToLookUp, + TablePrefixInfo tablePrefixInfo) { + return filterRelevantSstFiles(RdbUtil.getSSTFilesForComparison(((RDBStore)snapshot.getMetadataManager().getStore()) + .getDb().getManagedRocksDb(), tablesToLookUp), tablesToLookUp, tablePrefixInfo); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/package-info.java new file mode 100644 index 000000000000..c398f62a9e19 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains classes to compute the delta files between two snapshots. + */ +package org.apache.hadoop.ozone.om.snapshot.diff.delta; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 8c5ec7e5ab45..3750c430c143 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -19,6 +19,7 @@ import static org.apache.commons.io.file.PathUtils.copyDirectory; import static org.apache.hadoop.hdds.utils.HAUtils.getExistingFiles; +import static org.apache.hadoop.hdds.utils.IOUtils.getINode; import static org.apache.hadoop.ozone.OzoneConsts.OM_CHECKPOINT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; @@ -29,7 +30,6 @@ import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.VOLUME_TABLE; -import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.getINode; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotUtils.java index 9b402b8dca67..38905e4dd253 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.snapshot; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.getINode; +import static org.apache.hadoop.hdds.utils.IOUtils.getINode; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java new file mode 100644 index 000000000000..acd57a21219f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java @@ -0,0 +1,448 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.diff.delta; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableSet; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.UUID; +import java.util.function.Consumer; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus; +import org.apache.ozone.rocksdb.util.SstFileInfo; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +/** + * Unit tests for FileLinkDeltaFileComputer. + */ +public class TestFileLinkDeltaFileComputer { + + @TempDir + private Path tempDir; + + @Mock + private OmSnapshotManager omSnapshotManager; + + @Mock + private OMMetadataManager activeMetadataManager; + + @Mock + private OmSnapshotLocalDataManager localDataManager; + + @Mock + private Consumer activityReporter; + + private AutoCloseable mocks; + private Path deltaDirPath; + private TestableFileLinkDeltaFileComputer deltaFileComputer; + + @BeforeEach + public void setUp() throws IOException { + mocks = MockitoAnnotations.openMocks(this); + deltaDirPath = tempDir.resolve("delta"); + when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(localDataManager); + } + + @AfterEach + public void tearDown() throws Exception { + if (deltaFileComputer != null) { + deltaFileComputer.close(); + } + if (mocks != null) { + mocks.close(); + } + } + + /** + * Tests that the constructor creates the delta directory successfully. + */ + @Test + public void testConstructorCreatesDeltaDirectory() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + assertTrue(Files.exists(deltaDirPath), "Delta directory should be created"); + assertTrue(Files.isDirectory(deltaDirPath), "Delta path should be a directory"); + } + + /** + * Tests that the constructor handles an existing delta directory. + */ + @Test + public void testConstructorWithExistingDirectory() throws IOException { + Files.createDirectories(deltaDirPath); + assertTrue(Files.exists(deltaDirPath)); + + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + assertTrue(Files.exists(deltaDirPath), "Delta directory should exist"); + } + + /** + * Tests creating a hard link to a file. + */ + @Test + public void testCreateLink() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + // Create a source file + Path sourceFile = tempDir.resolve("source.sst"); + Files.createFile(sourceFile); + Files.write(sourceFile, "test data".getBytes()); + + // Create a hard link + Path linkPath = deltaFileComputer.createLink(sourceFile); + + assertNotNull(linkPath, "Link path should not be null"); + assertTrue(Files.exists(linkPath), "Link should be created"); + assertTrue(linkPath.getFileName().toString().endsWith(".sst"), "Link should preserve file extension"); + assertEquals("test data", new String(Files.readAllBytes(linkPath)), "Link should point to same data"); + } + + /** + * Tests creating multiple hard links increments the counter. + */ + @Test + public void testCreateMultipleLinks() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + // Create multiple source files + List sourceFiles = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + Path sourceFile = tempDir.resolve("source" + i + ".sst"); + Files.createFile(sourceFile); + sourceFiles.add(sourceFile); + } + + // Create hard links + Set linkNames = new HashSet<>(); + for (Path sourceFile : sourceFiles) { + Path linkPath = deltaFileComputer.createLink(sourceFile); + linkNames.add(linkPath.getFileName().toString()); + } + + assertEquals(5, linkNames.size(), "All links should have unique names"); + } + + /** + * Tests creating a link when the link already exists (concurrent scenario). + */ + @Test + public void testCreateLinkWhenLinkExists() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + // Create a source file + Path sourceFile = tempDir.resolve("source.sst"); + Files.createFile(sourceFile); + + // Create first link + Path firstLink = deltaFileComputer.createLink(sourceFile); + assertTrue(Files.exists(firstLink)); + + // Manually create the next link file to simulate concurrent creation + Path expectedNextLink = deltaDirPath.resolve("2.sst"); + Files.createFile(expectedNextLink); + + // Try to create another link - it should handle the FileAlreadyExistsException + Path secondLink = deltaFileComputer.createLink(sourceFile); + assertEquals(expectedNextLink, secondLink); + } + + /** + * Tests the updateActivity method calls the activity reporter. + */ + @Test + public void testUpdateActivity() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + SubStatus status = SubStatus.SST_FILE_DELTA_DAG_WALK; + deltaFileComputer.updateActivity(status); + + verify(activityReporter, times(1)).accept(status); + } + + /** + * Tests the updateActivity method with multiple status updates. + */ + @Test + public void testMultipleActivityUpdates() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + SubStatus[] statuses = {SubStatus.SST_FILE_DELTA_DAG_WALK, SubStatus.SST_FILE_DELTA_FULL_DIFF, + SubStatus.DIFF_REPORT_GEN}; + for (SubStatus status : statuses) { + deltaFileComputer.updateActivity(status); + } + + ArgumentCaptor captor = ArgumentCaptor.forClass(SubStatus.class); + verify(activityReporter, times(3)).accept(captor.capture()); + assertEquals(3, captor.getAllValues().size()); + } + + /** + * Tests the close method deletes the delta directory. + */ + @Test + public void testCloseDeletesDeltaDirectory() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + assertTrue(Files.exists(deltaDirPath), "Delta directory should exist before close"); + + deltaFileComputer.close(); + + assertFalse(Files.exists(deltaDirPath), "Delta directory should be deleted after close"); + } + + /** + * Tests close when delta directory doesn't exist. + */ + @Test + public void testCloseWithNonExistentDirectory() throws IOException { + Path nonExistentPath = tempDir.resolve("nonexistent"); + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + nonExistentPath, activityReporter); + + // Delete the directory + Files.deleteIfExists(nonExistentPath); + + // Close should not throw an exception + deltaFileComputer.close(); + } + + /** + * Tests close deletes directory with files in it. + */ + @Test + public void testCloseDeletesDirectoryWithFiles() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + // Create source files and links + for (int i = 0; i < 3; i++) { + Path sourceFile = tempDir.resolve("source" + i + ".sst"); + Files.createFile(sourceFile); + deltaFileComputer.createLink(sourceFile); + } + + assertTrue(Files.list(deltaDirPath).count() > 0, "Delta directory should contain files"); + + deltaFileComputer.close(); + + assertFalse(Files.exists(deltaDirPath), "Delta directory with files should be deleted"); + } + + /** + * Tests getLocalDataProvider delegates to snapshot manager. + */ + @Test + public void testGetLocalDataProvider() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + UUID snapshotId = UUID.randomUUID(); + UUID toResolveId = UUID.randomUUID(); + ReadableOmSnapshotLocalDataProvider mockProvider = mock(ReadableOmSnapshotLocalDataProvider.class); + + when(localDataManager.getOmSnapshotLocalData(snapshotId, toResolveId)).thenReturn(mockProvider); + + ReadableOmSnapshotLocalDataProvider result = deltaFileComputer.getLocalDataProvider(snapshotId, toResolveId); + + assertEquals(mockProvider, result); + verify(localDataManager, times(1)).getOmSnapshotLocalData(snapshotId, toResolveId); + } + + /** + * Tests getSnapshot delegates to snapshot manager. + */ + @Test + public void testGetSnapshot() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + SnapshotInfo snapshotInfo = createMockSnapshotInfo("vol1", "bucket1", "snap1"); + @SuppressWarnings("unchecked") + UncheckedAutoCloseableSupplier mockSnapshot = mock(UncheckedAutoCloseableSupplier.class); + + when(omSnapshotManager.getActiveSnapshot("vol1", "bucket1", "snap1")).thenReturn(mockSnapshot); + + UncheckedAutoCloseableSupplier result = deltaFileComputer.getSnapshot(snapshotInfo); + + assertEquals(mockSnapshot, result); + verify(omSnapshotManager, times(1)).getActiveSnapshot("vol1", "bucket1", "snap1"); + } + + /** + * Tests getActiveMetadataManager returns the correct instance. + */ + @Test + public void testGetActiveMetadataManager() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + OMMetadataManager result = deltaFileComputer.getActiveMetadataManager(); + + assertEquals(activeMetadataManager, result); + } + + /** + * Tests getDeltaFiles method invokes computeDeltaFiles correctly. + */ + @Test + public void testGetDeltaFiles() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1"); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2"); + Set tablesToLookup = ImmutableSet.of("keyTable", "fileTable"); + + TablePrefixInfo tablePrefixInfo = mock(TablePrefixInfo.class); + when(activeMetadataManager.getTableBucketPrefix("vol1", "bucket1")).thenReturn(tablePrefixInfo); + + // Set up the test implementation to return some delta files + Map> deltaMap = new HashMap<>(); + Path sstPath = tempDir.resolve("test.sst"); + Files.createFile(sstPath); + SstFileInfo sstFileInfo = mock(SstFileInfo.class); + deltaMap.put(deltaDirPath.resolve("1.sst"), Pair.of(sstPath, sstFileInfo)); + + deltaFileComputer.setComputeDeltaFilesResult(Optional.of(deltaMap)); + + Optional>> result = + deltaFileComputer.getDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup); + + assertTrue(result.isPresent(), "Result should be present"); + assertEquals(1, result.get().size(), "Should have one delta file"); + verify(activeMetadataManager, times(1)).getTableBucketPrefix("vol1", "bucket1"); + } + + /** + * Tests getDeltaFiles when computeDeltaFiles returns empty. + */ + @Test + public void testGetDeltaFilesReturnsEmpty() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1"); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2"); + Set tablesToLookup = ImmutableSet.of("keyTable"); + + TablePrefixInfo tablePrefixInfo = mock(TablePrefixInfo.class); + when(activeMetadataManager.getTableBucketPrefix("vol1", "bucket1")).thenReturn(tablePrefixInfo); + + deltaFileComputer.setComputeDeltaFilesResult(Optional.empty()); + + Optional>> result = + deltaFileComputer.getDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup); + + assertFalse(result.isPresent(), "Result should be empty"); + } + + /** + * Tests that links preserve file extensions correctly. + */ + @Test + public void testLinkPreservesFileExtension() throws IOException { + deltaFileComputer = new TestableFileLinkDeltaFileComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + String[] extensions = {"sst", "txt", "log", "data"}; + for (String ext : extensions) { + Path sourceFile = tempDir.resolve("source." + ext); + Files.createFile(sourceFile); + + Path linkPath = deltaFileComputer.createLink(sourceFile); + + assertTrue(linkPath.getFileName().toString().endsWith("." + ext), + "Link should preserve extension: " + ext); + } + } + + // Helper methods + + private SnapshotInfo createMockSnapshotInfo(String volumeName, String bucketName, String snapshotName) { + SnapshotInfo.Builder builder = SnapshotInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setName(snapshotName) + .setSnapshotId(UUID.randomUUID()); + return builder.build(); + } + + /** + * Concrete implementation of FileLinkDeltaFileComputer for testing. + */ + private static class TestableFileLinkDeltaFileComputer extends FileLinkDeltaFileComputer { + + private Optional>> computeDeltaFilesResult = Optional.empty(); + + TestableFileLinkDeltaFileComputer(OmSnapshotManager snapshotManager, OMMetadataManager activeMetadataManager, + Path deltaDirPath, Consumer activityReporter) throws IOException { + super(snapshotManager, activeMetadataManager, deltaDirPath, activityReporter); + } + + @Override + Optional>> computeDeltaFiles(SnapshotInfo fromSnapshot, + SnapshotInfo toSnapshot, Set tablesToLookup, TablePrefixInfo tablePrefixInfo) throws IOException { + return computeDeltaFilesResult; + } + + void setComputeDeltaFilesResult(Optional>> result) { + this.computeDeltaFilesResult = result; + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java new file mode 100644 index 000000000000..adc96dc99622 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java @@ -0,0 +1,339 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.diff.delta; + +import static org.apache.hadoop.hdds.utils.IOUtils.getINode; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.RocksDatabase; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus; +import org.apache.ozone.rocksdb.util.SstFileInfo; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.rocksdb.LiveFileMetaData; +import org.rocksdb.RocksDB; + +/** + * Unit tests for FullDiffComputer. + */ +public class TestFullDiffComputer { + + @Mock + private OmSnapshotManager omSnapshotManager; + + @Mock + private OMMetadataManager activeMetadataManager; + + @Mock + private OmSnapshotLocalDataManager localDataManager; + + @Mock + private Consumer activityReporter; + + @TempDir + private Path tempDir; + + private Path deltaDirPath; + + private AutoCloseable mocks; + private FullDiffComputer fullDiffComputer; + + @BeforeEach + public void setUp() throws IOException { + mocks = MockitoAnnotations.openMocks(this); + deltaDirPath = tempDir.resolve("delta"); + when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(localDataManager); + } + + @AfterEach + public void tearDown() throws Exception { + if (fullDiffComputer != null) { + fullDiffComputer.close(); + } + if (mocks != null) { + mocks.close(); + } + } + + /** + * Tests that the constructor creates a FullDiffComputer successfully. + */ + @Test + public void testConstructor() throws IOException { + fullDiffComputer = new FullDiffComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + assertNotNull(fullDiffComputer, "FullDiffComputer should be created"); + assertTrue(Files.exists(deltaDirPath), "Delta directory should be created"); + } + + public static Stream computeDeltaFileCases() { + return Stream.of( + Arguments.of("Delta File with same source and target", + ImmutableMap.of(new SstFileInfo("1", "ac", "ae", "cf1"), 1, + new SstFileInfo("2", "ad", "ag", "cf1"), 2), + ImmutableMap.of(new SstFileInfo("3", "ah", "ak", "cf1"), 1, + new SstFileInfo("4", "af", "ai", "cf1"), 2), + ImmutableMap.of("cf1", "a", "cf2", "z"), Collections.emptyMap(), ImmutableSet.of("cf1")), + Arguments.of("Delta File with source having more files", + ImmutableMap.of(new SstFileInfo("1", "ac", "ae", "cf1"), 1, + new SstFileInfo("2", "ad", "ag", "cf1"), 2, + new SstFileInfo("3", "af", "ah", "cf1"), 3), + ImmutableMap.of(new SstFileInfo("3", "ah", "ak", "cf1"), 1, + new SstFileInfo("4", "af", "ai", "cf1"), 2), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableMap.of(Paths.get("snap1").resolve("3.sst"), new SstFileInfo("3", "af", "ah", "cf1")), + ImmutableSet.of("cf1")), + Arguments.of("Delta File with target having more files", + ImmutableMap.of(new SstFileInfo("1", "ac", "ae", "cf1"), 1, + new SstFileInfo("2", "ad", "ag", "cf1"), 2), + ImmutableMap.of(new SstFileInfo("3", "ah", "ak", "cf1"), 1, + new SstFileInfo("4", "af", "ai", "cf1"), 2, + new SstFileInfo("2", "af", "ah", "cf1"), 3), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableMap.of(Paths.get("snap2").resolve("2.sst"), new SstFileInfo("2", "af", "ah", "cf1")), + ImmutableSet.of("cf1")), + Arguments.of("Delta File computation with source files with invalid prefix", + ImmutableMap.of(new SstFileInfo("1", "ac", "ae", "cf1"), 1, + new SstFileInfo("2", "bh", "bi", "cf1"), 2), + ImmutableMap.of(new SstFileInfo("3", "ah", "ak", "cf1"), 1, + new SstFileInfo("4", "af", "ai", "cf1"), 2), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableMap.of(Paths.get("snap2").resolve("4.sst"), new SstFileInfo("4", "af", "ai", "cf1")), + ImmutableSet.of("cf1")), + Arguments.of("Delta File computation with target files with invalid prefix", + ImmutableMap.of(new SstFileInfo("1", "ac", "ae", "cf1"), 1, + new SstFileInfo("2", "ah", "ai", "cf1"), 2), + ImmutableMap.of(new SstFileInfo("3", "ah", "ak", "cf1"), 1, + new SstFileInfo("4", "bf", "bi", "cf1"), 2), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableMap.of(Paths.get("snap1").resolve("2.sst"), new SstFileInfo("2", "ah", "ai", "cf1")), + ImmutableSet.of("cf1")), + Arguments.of("Delta File computation with target files with multiple tables", + ImmutableMap.of(new SstFileInfo("1", "ac", "ae", "cf1"), 1, + new SstFileInfo("2", "ah", "ai", "cf1"), 2, + new SstFileInfo("3", "ah", "ai", "cf3"), 3), + ImmutableMap.of(new SstFileInfo("3", "ah", "ak", "cf1"), 1, + new SstFileInfo("4", "af", "ai", "cf1"), 2, + new SstFileInfo("5", "af", "ai", "cf4"), 5), + ImmutableMap.of("cf1", "a", "cf2", "z"), + Collections.emptyMap(), ImmutableSet.of("cf1")), + Arguments.of("Delta File computation with target files with multiple tables to lookup on source", + ImmutableMap.of(new SstFileInfo("1", "ac", "ae", "cf1"), 1, + new SstFileInfo("2", "ah", "ai", "cf1"), 2, + new SstFileInfo("3", "ah", "ai", "cf3"), 3), + ImmutableMap.of(new SstFileInfo("3", "ah", "ak", "cf1"), 1, + new SstFileInfo("4", "af", "ai", "cf1"), 2, + new SstFileInfo("5", "af", "ai", "cf4"), 5), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableMap.of(Paths.get("snap1").resolve("3.sst"), new SstFileInfo("3", "ah", "ai", "cf3")), + ImmutableSet.of("cf1", "cf3")), + Arguments.of("Delta File computation with target files with multiple tables to lookup on target", + ImmutableMap.of(new SstFileInfo("1", "ac", "ae", "cf1"), 1, + new SstFileInfo("2", "ah", "ai", "cf1"), 2, + new SstFileInfo("3", "ah", "ai", "cf3"), 3), + ImmutableMap.of(new SstFileInfo("3", "ah", "ak", "cf1"), 1, + new SstFileInfo("4", "af", "ai", "cf1"), 2, + new SstFileInfo("5", "af", "ai", "cf4"), 5), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableMap.of(Paths.get("snap2").resolve("5.sst"), new SstFileInfo("5", "af", "ai", "cf4")), + ImmutableSet.of("cf1", "cf4")) + ); + } + + @ParameterizedTest + @MethodSource("computeDeltaFileCases") + public void testComputeDeltaFiles(String description, + Map sourceSnapshotFiles, Map targetSnapshotFiles, + Map tablePrefixMap, Map expectedDiffFile, + Set tablesToLookup) throws IOException { + fullDiffComputer = new FullDiffComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + File sstFileDirPath = tempDir.resolve("sstFiles").toFile(); + sstFileDirPath.mkdirs(); + Map paths = Stream.concat(sourceSnapshotFiles.values().stream(), + targetSnapshotFiles.values().stream()) + .distinct().collect(Collectors.toMap(Function.identity(), i -> { + // Create mock SST files + try { + Path sstFilePath = sstFileDirPath.toPath().resolve(UUID.randomUUID() + ".sst").toAbsolutePath(); + sstFilePath.toFile().createNewFile(); + return sstFilePath; + } catch (IOException e) { + throw new RuntimeException(e); + } + })); + + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1"); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2"); + Path snapDirectory = tempDir.resolve("snaps"); + OmSnapshot fromSnap = createMockSnapshot(snapDirectory, fromSnapshot, + sourceSnapshotFiles.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, + entry -> paths.get(entry.getValue())))); + OmSnapshot toSnap = createMockSnapshot(snapDirectory, toSnapshot, + targetSnapshotFiles.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, + entry -> paths.get(entry.getValue())))); + + @SuppressWarnings("unchecked") + UncheckedAutoCloseableSupplier fromHandle = mock(UncheckedAutoCloseableSupplier.class); + @SuppressWarnings("unchecked") + UncheckedAutoCloseableSupplier toHandle = mock(UncheckedAutoCloseableSupplier.class); + + when(fromHandle.get()).thenReturn(fromSnap); + when(toHandle.get()).thenReturn(toSnap); + when(omSnapshotManager.getActiveSnapshot("vol1", "bucket1", "snap1")).thenReturn(fromHandle); + when(omSnapshotManager.getActiveSnapshot("vol1", "bucket1", "snap2")).thenReturn(toHandle); + + TablePrefixInfo tablePrefixInfo = new TablePrefixInfo(tablePrefixMap); + + Map result = fullDiffComputer.computeDeltaFiles(fromSnapshot, toSnapshot, + tablesToLookup, tablePrefixInfo).orElse(Collections.emptyMap()).entrySet() + .stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getValue())); + when(activeMetadataManager.getTableBucketPrefix("vol1", "bucket1")).thenReturn(tablePrefixInfo); + assertEquals(expectedDiffFile.entrySet().stream().collect( + Collectors.toMap(entry -> snapDirectory.resolve(entry.getKey()), Map.Entry::getValue)), + result); + + Set iNodes = fullDiffComputer.getDeltaFiles(fromSnapshot, toSnapshot, + tablesToLookup).orElse(Collections.emptyList()).stream() + .map(Pair::getKey).map(path -> { + try { + return getINode(path); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).collect(Collectors.toSet()); + Set expectedInodes = result.keySet().stream().map(path -> { + try { + return getINode(path); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).collect(Collectors.toSet()); + assertEquals(expectedInodes, iNodes); + } + + /** + * Tests that close properly cleans up resources. + */ + @Test + public void testClose() throws IOException { + fullDiffComputer = new FullDiffComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + assertTrue(Files.exists(deltaDirPath), "Delta directory should exist"); + + fullDiffComputer.close(); + + assertFalse(Files.exists(deltaDirPath), "Delta directory should be cleaned up after close"); + } + + // Helper methods + private SnapshotInfo createMockSnapshotInfo(String volumeName, String bucketName, String snapshotName) { + SnapshotInfo.Builder builder = SnapshotInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setName(snapshotName) + .setSnapshotId(UUID.randomUUID()); + return builder.build(); + } + + private LiveFileMetaData getMockLiveFileMetaData(Path dbLocation, SstFileInfo sstFileInfo) { + LiveFileMetaData liveFileMetaData = mock(LiveFileMetaData.class); + String path = sstFileInfo.getFilePath(dbLocation).getParent().toAbsolutePath().toString(); + when(liveFileMetaData.fileName()).thenReturn(sstFileInfo.getFilePath(dbLocation).getFileName().toString()); + when(liveFileMetaData.path()).thenReturn(path); + when(liveFileMetaData.columnFamilyName()).thenReturn(StringUtils.string2Bytes(sstFileInfo.getColumnFamily())); + when(liveFileMetaData.smallestKey()).thenReturn(StringUtils.string2Bytes(sstFileInfo.getStartKey())); + when(liveFileMetaData.largestKey()).thenReturn(StringUtils.string2Bytes(sstFileInfo.getEndKey())); + + return liveFileMetaData; + } + + private OmSnapshot createMockSnapshot(Path snapshotDir, SnapshotInfo snapshotInfo, + Map sstFilesLinks) throws IOException { + OmSnapshot snapshot = mock(OmSnapshot.class); + OMMetadataManager metadataManager = mock(OMMetadataManager.class); + RDBStore store = mock(RDBStore.class); + RocksDatabase database = mock(RocksDatabase.class); + when(store.getDb()).thenReturn(database); + ManagedRocksDB managedRocksDB = mock(ManagedRocksDB.class); + when(database.getManagedRocksDb()).thenReturn(managedRocksDB); + RocksDB rocksDB = mock(RocksDB.class); + when(managedRocksDB.get()).thenReturn(rocksDB); + + File dbLocation = snapshotDir.resolve(snapshotInfo.getName()).toFile(); + List liveFileMetaDataList = sstFilesLinks.keySet().stream() + .map(sstFileInfo -> getMockLiveFileMetaData(dbLocation.toPath(), sstFileInfo)) + .collect(Collectors.toList()); + when(rocksDB.getLiveFilesMetaData()).thenReturn(liveFileMetaDataList); + dbLocation.mkdirs(); + Map sstFilesPaths = new HashMap<>(); + for (Map.Entry sstFile : sstFilesLinks.entrySet()) { + File path = sstFile.getKey().getFilePath(dbLocation.toPath()).toFile(); + Files.createLink(path.toPath(), sstFile.getValue()); + sstFilesPaths.put(sstFile.getKey(), path.toPath()); + } + when(snapshot.getMetadataManager()).thenReturn(metadataManager); + when(metadataManager.getStore()).thenReturn(store); + when(store.getDbLocation()).thenReturn(dbLocation); + + return snapshot; + } +} From 8523deabc6ecdfa99511d9122f42f2e7daaa2402 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 12 Nov 2025 09:48:53 -0500 Subject: [PATCH 108/126] HDDS-13912. Fix create link logic Change-Id: Iaad718f0c7bc23c67650144c0c7ac26f0060a437 --- .../diff/delta/FileLinkDeltaFileComputer.java | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java index 8ca1c8be51c9..d456b48c0900 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java @@ -105,13 +105,20 @@ void updateActivity(SubStatus status) { Path createLink(Path path) throws IOException { Path source = path.toAbsolutePath(); - Path link = deltaDir.resolve(linkFileCounter.incrementAndGet() + - "." + getExtension(source.getFileName().toString())); - try { - Files.createLink(link, source); - } catch (FileAlreadyExistsException ignored) { - LOG.debug("File for source {} already exists: at {}", source, link); - } + Path link; + boolean createdLink = false; + do { + link = deltaDir.resolve(linkFileCounter.incrementAndGet() + + "." + getExtension(source.getFileName().toString())); + try { + Files.createLink(link, source); + createdLink = true; + } catch (FileAlreadyExistsException ignored) { + LOG.info("File for source {} already exists: at {}. Will attempt to create link with a different path", source, + link); + } + + } while (!createdLink); return link; } From 9ee02983cab55191c99727c1e86d60a5ae857e8b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 12 Nov 2025 13:37:29 -0500 Subject: [PATCH 109/126] HDDS-13912. Fix findbugs Change-Id: Id2be646eeb0aa1d264211bf2950803bcb2839ce9 --- .../ozone/rocksdb/util/SstFileInfo.java | 2 +- .../diff/delta/FileLinkDeltaFileComputer.java | 12 ++++++++--- .../delta/TestFileLinkDeltaFileComputer.java | 8 +++++--- .../diff/delta/TestFullDiffComputer.java | 20 +++++++++---------- 4 files changed, 25 insertions(+), 17 deletions(-) diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileInfo.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileInfo.java index bc4975f98680..83d871a9f28a 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileInfo.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileInfo.java @@ -92,7 +92,7 @@ public int hashCode() { } public Path getFilePath(Path directoryPath) { - return directoryPath.resolve(fileName + SST_FILE_EXTENSION); + return directoryPath.resolve(getFileName() + SST_FILE_EXTENSION); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java index d456b48c0900..8e9994c6abd7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java @@ -32,6 +32,7 @@ import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -92,6 +93,7 @@ public abstract class FileLinkDeltaFileComputer implements DeltaFileComputer { abstract Optional>> computeDeltaFiles(SnapshotInfo fromSnapshot, SnapshotInfo toSnapshot, Set tablesToLookup, TablePrefixInfo tablePrefixInfo) throws IOException; + @Override public Optional>> getDeltaFiles(SnapshotInfo fromSnapshot, SnapshotInfo toSnapshot, Set tablesToLookup) throws IOException { TablePrefixInfo tablePrefixInfo = activeMetadataManager.getTableBucketPrefix(fromSnapshot.getVolumeName(), @@ -107,9 +109,14 @@ Path createLink(Path path) throws IOException { Path source = path.toAbsolutePath(); Path link; boolean createdLink = false; + Path fileName = source.getFileName(); + if (source.getFileName() == null) { + throw new IOException("Unable to create link for path " + source + " since it has no file name"); + } + String extension = getExtension(fileName.toString()); + extension = StringUtils.isBlank(extension) ? "" : ("." + extension); do { - link = deltaDir.resolve(linkFileCounter.incrementAndGet() + - "." + getExtension(source.getFileName().toString())); + link = deltaDir.resolve(linkFileCounter.incrementAndGet() + extension); try { Files.createLink(link, source); createdLink = true; @@ -117,7 +124,6 @@ Path createLink(Path path) throws IOException { LOG.info("File for source {} already exists: at {}. Will attempt to create link with a different path", source, link); } - } while (!createdLink); return link; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java index acd57a21219f..44d16d25c8dc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.om.snapshot.diff.delta; +import static org.apache.hadoop.hdds.StringUtils.bytes2String; +import static org.apache.hadoop.hdds.StringUtils.string2Bytes; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -136,7 +138,7 @@ public void testCreateLink() throws IOException { // Create a source file Path sourceFile = tempDir.resolve("source.sst"); Files.createFile(sourceFile); - Files.write(sourceFile, "test data".getBytes()); + Files.write(sourceFile, string2Bytes("test data")); // Create a hard link Path linkPath = deltaFileComputer.createLink(sourceFile); @@ -144,7 +146,7 @@ public void testCreateLink() throws IOException { assertNotNull(linkPath, "Link path should not be null"); assertTrue(Files.exists(linkPath), "Link should be created"); assertTrue(linkPath.getFileName().toString().endsWith(".sst"), "Link should preserve file extension"); - assertEquals("test data", new String(Files.readAllBytes(linkPath)), "Link should point to same data"); + assertEquals("test data", bytes2String(Files.readAllBytes(linkPath)), "Link should point to same data"); } /** @@ -167,7 +169,7 @@ public void testCreateMultipleLinks() throws IOException { Set linkNames = new HashSet<>(); for (Path sourceFile : sourceFiles) { Path linkPath = deltaFileComputer.createLink(sourceFile); - linkNames.add(linkPath.getFileName().toString()); + linkNames.add(Optional.ofNullable(linkPath.getFileName()).map(Path::toString).orElse("null")); } assertEquals(5, linkNames.size(), "All links should have unique names"); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java index adc96dc99622..faab2028d97a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java @@ -33,7 +33,6 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -206,14 +205,14 @@ public void testComputeDeltaFiles(String description, deltaDirPath, activityReporter); File sstFileDirPath = tempDir.resolve("sstFiles").toFile(); - sstFileDirPath.mkdirs(); + assertTrue(sstFileDirPath.mkdirs() || sstFileDirPath.exists()); Map paths = Stream.concat(sourceSnapshotFiles.values().stream(), targetSnapshotFiles.values().stream()) .distinct().collect(Collectors.toMap(Function.identity(), i -> { // Create mock SST files try { Path sstFilePath = sstFileDirPath.toPath().resolve(UUID.randomUUID() + ".sst").toAbsolutePath(); - sstFilePath.toFile().createNewFile(); + assertTrue(sstFilePath.toFile().createNewFile() || sstFilePath.toFile().exists()); return sstFilePath; } catch (IOException e) { throw new RuntimeException(e); @@ -296,8 +295,9 @@ private SnapshotInfo createMockSnapshotInfo(String volumeName, String bucketName private LiveFileMetaData getMockLiveFileMetaData(Path dbLocation, SstFileInfo sstFileInfo) { LiveFileMetaData liveFileMetaData = mock(LiveFileMetaData.class); - String path = sstFileInfo.getFilePath(dbLocation).getParent().toAbsolutePath().toString(); - when(liveFileMetaData.fileName()).thenReturn(sstFileInfo.getFilePath(dbLocation).getFileName().toString()); + String path = dbLocation.toAbsolutePath().toString(); + String fileName = sstFileInfo.getFilePath(dbLocation).toFile().getName(); + when(liveFileMetaData.fileName()).thenReturn(fileName); when(liveFileMetaData.path()).thenReturn(path); when(liveFileMetaData.columnFamilyName()).thenReturn(StringUtils.string2Bytes(sstFileInfo.getColumnFamily())); when(liveFileMetaData.smallestKey()).thenReturn(StringUtils.string2Bytes(sstFileInfo.getStartKey())); @@ -318,17 +318,17 @@ private OmSnapshot createMockSnapshot(Path snapshotDir, SnapshotInfo snapshotInf RocksDB rocksDB = mock(RocksDB.class); when(managedRocksDB.get()).thenReturn(rocksDB); - File dbLocation = snapshotDir.resolve(snapshotInfo.getName()).toFile(); + Path dbLocationPath = snapshotDir.resolve(snapshotInfo.getName()); + File dbLocation = dbLocationPath.toFile(); List liveFileMetaDataList = sstFilesLinks.keySet().stream() - .map(sstFileInfo -> getMockLiveFileMetaData(dbLocation.toPath(), sstFileInfo)) + .map(sstFileInfo -> getMockLiveFileMetaData(dbLocationPath, sstFileInfo)) .collect(Collectors.toList()); when(rocksDB.getLiveFilesMetaData()).thenReturn(liveFileMetaDataList); - dbLocation.mkdirs(); - Map sstFilesPaths = new HashMap<>(); + assertTrue(dbLocation.mkdirs() || dbLocation.exists()); + for (Map.Entry sstFile : sstFilesLinks.entrySet()) { File path = sstFile.getKey().getFilePath(dbLocation.toPath()).toFile(); Files.createLink(path.toPath(), sstFile.getValue()); - sstFilesPaths.put(sstFile.getKey(), path.toPath()); } when(snapshot.getMetadataManager()).thenReturn(metadataManager); when(metadataManager.getStore()).thenReturn(store); From 973624738f5ab80e854ef52827559f6afa624844 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 13 Nov 2025 16:16:19 -0500 Subject: [PATCH 110/126] HDDS-13867. Fix bug Change-Id: I1d7aded575ac6daaa5f78b3be9f1821d94cb24c6 --- .../org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index b55fa6552eec..d170ff0eace7 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -829,7 +829,7 @@ public synchronized Optional> getSSTDiffList(DifferSnapshotVer internalGetSSTDiffList(src, dest, fwdDAGSameFiles, fwdDAGDifferentFiles); } else { Set srcSstFileInfos = new HashSet<>(src.getSstFileMap().values()); - Set destSstFileInfos = new HashSet<>(src.getSstFileMap().values()); + Set destSstFileInfos = new HashSet<>(dest.getSstFileMap().values()); for (SstFileInfo srcSstFileInfo : srcSstFileInfos) { if (destSstFileInfos.contains(srcSstFileInfo)) { fwdDAGSameFiles.put(srcSstFileInfo.getFileName(), srcSstFileInfo); From 2167b031be068b0b238872d0a4386a4093e21350 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 13 Nov 2025 22:34:26 -0500 Subject: [PATCH 111/126] HDDS-13867. Add test Change-Id: Iba0dd30952d17d11eae900213437aa4c28564448 --- .../TestRocksDBCheckpointDiffer.java | 110 ++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index 98a35023b023..ec2072b638b8 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -48,6 +48,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.graph.MutableGraph; @@ -441,6 +442,115 @@ private static DifferSnapshotInfo mockDifferSnapshotVersion(String dbPath, long return differSnapshotInfo; } + private static Stream getSSTDiffListWithoutCompactionDAGCase() { + return Stream.of( + Arguments.of("Delta File with same source and target", + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "ad", "ag", "cf1")), + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "ad", "ag", "cf1")), + ImmutableMap.of("cf1", "a", "cf2", "z"), ImmutableSet.of("cf1"), Collections.emptyList()), + Arguments.of("Delta File with source having more files", + ImmutableList.of( + new SstFileInfo("2", "ad", "ag", "cf1"), + new SstFileInfo("3", "af", "ah", "cf1")), + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "ad", "ag", "cf1"), + new SstFileInfo("3", "af", "ah", "cf1")), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableSet.of("cf1"), + ImmutableList.of(new SstFileInfo("1", "ac", "ae", "cf1"))), + Arguments.of("Delta File with target having more files", + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "ad", "ag", "cf1"), + new SstFileInfo("3", "af", "ah", "cf1")), + ImmutableList.of( + new SstFileInfo("2", "ad", "ag", "cf1"), + new SstFileInfo("3", "af", "ah", "cf1")), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableSet.of("cf1"), + ImmutableList.of(new SstFileInfo("1", "ac", "ae", "cf1"))), + Arguments.of("Delta File computation with source files with invalid prefix", + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "bh", "bi", "cf1")), + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("4", "af", "ai", "cf1")), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableSet.of("cf1"), + ImmutableList.of(new SstFileInfo("4", "af", "ai", "cf1"))), + Arguments.of("Delta File computation with target files with invalid prefix", + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "ah", "ai", "cf1")), + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("4", "bf", "bi", "cf1")), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableSet.of("cf1"), + ImmutableList.of(new SstFileInfo("2", "ah", "ai", "cf1"))), + Arguments.of("Delta File computation with target files with multiple tables", + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "ah", "ai", "cf1"), + new SstFileInfo("3", "ah", "ai", "cf3")), + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "ah", "ai", "cf1"), + new SstFileInfo("5", "af", "ai", "cf4")), + ImmutableMap.of("cf1", "a", "cf2", "z"), ImmutableSet.of("cf1"), Collections.emptyList()), + Arguments.of("Delta File computation with target files with multiple tables to lookup on source", + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "ah", "ai", "cf1"), + new SstFileInfo("3", "ah", "ai", "cf3")), + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "ah", "ai", "cf1"), + new SstFileInfo("5", "af", "ai", "cf4")), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableSet.of("cf1", "cf3"), + ImmutableList.of(new SstFileInfo("3", "ah", "ai", "cf3"))), + Arguments.of("Delta File computation with target files with multiple tables to lookup on target", + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "ah", "ai", "cf1"), + new SstFileInfo("3", "ah", "ai", "cf3")), + ImmutableList.of( + new SstFileInfo("1", "ac", "ae", "cf1"), + new SstFileInfo("2", "ah", "ai", "cf1"), + new SstFileInfo("5", "af", "ai", "cf4")), + ImmutableMap.of("cf1", "a", "cf2", "z"), + ImmutableSet.of("cf1", "cf4"), + ImmutableList.of(new SstFileInfo("5", "af", "ai", "cf4"))) + ); + } + + private DifferSnapshotInfo getDifferSnapshotInfoForVersion(List sstFiles, int version) { + TreeMap> sourceSstFileMap = new TreeMap<>(); + sourceSstFileMap.put(version, sstFiles); + return new DifferSnapshotInfo(v -> Paths.get("src"), UUID.randomUUID(), 0, sourceSstFileMap); + } + + @ParameterizedTest + @MethodSource("getSSTDiffListWithoutCompactionDAGCase") + public void testGetSSTDiffListWithoutCompactionDag(String description, List sourceSstFiles, + List destSstFiles, Map prefixMap, Set tablesToLookup, + List expectedDiffList) { + DifferSnapshotInfo sourceDSI = getDifferSnapshotInfoForVersion(sourceSstFiles, 0); + DifferSnapshotVersion sourceVersion = new DifferSnapshotVersion(sourceDSI, 0, tablesToLookup); + DifferSnapshotInfo destDSI = getDifferSnapshotInfoForVersion(destSstFiles, 1); + DifferSnapshotVersion destVersion = new DifferSnapshotVersion(destDSI, 1, tablesToLookup); + List diffList = rocksDBCheckpointDiffer.getSSTDiffList(sourceVersion, destVersion, + new TablePrefixInfo(prefixMap), tablesToLookup, false).orElse(null); + assertEquals(expectedDiffList, diffList); + } + /** * Test cases for testGetSSTDiffListWithoutDB. */ From 08156e4ad5b0ad78828e292221ad6e624e14687b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 13 Nov 2025 22:52:05 -0500 Subject: [PATCH 112/126] HDDS-13867. Address review comments Change-Id: Idba640c859ed4906842f9fafebf04825409053ef --- .../apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java | 2 ++ .../ozone/rocksdiff/TestRocksDBCheckpointDiffer.java | 1 - .../hadoop/ozone/om/snapshot/SnapshotDiffManager.java | 8 ++++---- .../hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index d170ff0eace7..c9c4a6602d9a 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -770,6 +770,7 @@ private String getSSTFullPath(String sstFilenameWithoutExtension, Path... dbPath * * @param src source snapshot * @param dest destination snapshot + * @param versionMap version map containing the connection between source snapshot version and dest snapshot version. * @param tablesToLookup tablesToLookup set of table (column family) names used to restrict which SST files to return. * @param sstFilesDirForSnapDiffJob dir to create hardlinks for SST files * for snapDiff job. @@ -812,6 +813,7 @@ public synchronized Optional> getSSTDiffListWithFullPath(DifferSnap * * @param src source snapshot * @param dest destination snapshot + * @param prefixInfo TablePrefixInfo to filter irrelevant SST files; can be null. * @param tablesToLookup tablesToLookup Set of column-family (table) names to include when reading SST files; * must be non-null. * @return A list of SST files without extension. e.g. ["000050", "000060"] diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java index ec2072b638b8..8af38c5454b6 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java @@ -934,7 +934,6 @@ public void testGetSSTDiffListWithoutDB(String description, .map(i -> i.stream().map(SstFileInfo::getFileName).sorted().collect(Collectors.toList())).orElse(null)); } catch (RuntimeException rtEx) { if (!expectingException) { - rtEx.printStackTrace(); fail("Unexpected exception thrown in test."); } else { exceptionThrown = true; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 1c7258d74cbf..219fc01f0a56 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -379,7 +379,7 @@ private static DifferSnapshotInfo getDSIFromSI(OMMetadataManager activeOmMetadat } @VisibleForTesting - protected Set getSSTFileListForSnapshot(OmSnapshot snapshot, Set tablesToLookUp) { + protected Set getSSTFileSetForSnapshot(OmSnapshot snapshot, Set tablesToLookUp) { return RdbUtil.getSSTFilesForComparison( ((RDBStore)snapshot.getMetadataManager().getStore()).getDb().getManagedRocksDb(), tablesToLookUp); } @@ -1069,7 +1069,7 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( // tombstone is not loaded. // TODO: [SNAPSHOT] Update Rocksdb SSTFileIterator to read tombstone if (skipNativeDiff || !isNativeLibsLoaded) { - Set inputFiles = filterRelevantSstFiles(getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp), + Set inputFiles = filterRelevantSstFiles(getSSTFileSetForSnapshot(fromSnapshot, tablesToLookUp), tablesToLookUp, tablePrefixes); Path fromSnapshotPath = fromSnapshot.getMetadataManager().getStore().getDbLocation().getAbsoluteFile().toPath(); for (SstFileInfo sstFileInfo : inputFiles) { @@ -1246,9 +1246,9 @@ private Set getDiffFiles(OmSnapshot fromSnapshot, OmSnapshot toSnapshot, // In case of exception during inode read use all files LOG.error("Exception occurred while populating delta files for snapDiff", e); LOG.warn("Falling back to full file list comparison, inode-based optimization skipped."); - Set fromSnapshotFiles = filterRelevantSstFiles(getSSTFileListForSnapshot(fromSnapshot, + Set fromSnapshotFiles = filterRelevantSstFiles(getSSTFileSetForSnapshot(fromSnapshot, tablesToLookUp), tablesToLookUp, tablePrefixInfo); - Set toSnapshotFiles = filterRelevantSstFiles(getSSTFileListForSnapshot(toSnapshot, + Set toSnapshotFiles = filterRelevantSstFiles(getSSTFileSetForSnapshot(toSnapshot, tablesToLookUp), tablesToLookUp, tablePrefixInfo); diffFiles = new HashSet<>(); for (SstFileInfo sstFileInfo : fromSnapshotFiles) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 3976fec41871..b484ad628c72 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -1614,7 +1614,7 @@ public void testGetSnapshotDiffReportHappyCase() throws Exception { anyString()); doReturn(testDeltaFiles).when(spy) - .getSSTFileListForSnapshot(any(OmSnapshot.class), anySet()); + .getSSTFileSetForSnapshot(any(OmSnapshot.class), anySet()); doNothing().when(spy).addToObjectIdMap(eq(keyInfoTable), eq(keyInfoTable), any(), anyBoolean(), any(), any(), any(), any(), any(), any(), anyString()); From 8890252514b7a16f53eb634c2a6b794cf9a1127e Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 14 Nov 2025 10:38:20 -0500 Subject: [PATCH 113/126] HDDS-13912. Address review comments Change-Id: Id07b78b650ce58fcc71bbac478d085b139123b02 --- .../ozone/om/snapshot/diff/delta/FullDiffComputer.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java index 76e83ba36923..e3c6c0dcae46 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java @@ -88,8 +88,8 @@ Optional>> computeDeltaFiles(SnapshotInfo from LOG.error("Exception occurred while populating delta files for snapDiff", e); LOG.warn("Falling back to full file list comparison, inode-based optimization skipped."); paths.clear(); - Set fromSnapshotFiles = getSSTFileListForSnapshot(fromSnapshot, tablesToLookup, tablePrefixInfo); - Set toSnapshotFiles = getSSTFileListForSnapshot(toSnapshot, tablesToLookup, tablePrefixInfo); + Set fromSnapshotFiles = getSSTFileSetForSnapshot(fromSnapshot, tablesToLookup, tablePrefixInfo); + Set toSnapshotFiles = getSSTFileSetForSnapshot(toSnapshot, tablesToLookup, tablePrefixInfo); for (SstFileInfo sstFileInfo : fromSnapshotFiles) { Path source = sstFileInfo.getFilePath(fromSnapshotPath); paths.put(source, Pair.of(createLink(source), sstFileInfo)); @@ -109,7 +109,7 @@ static Map getSSTFileMapForSnapshot(OmSnapshot snapshot, .getStore()).getDb().getManagedRocksDb(), tablesToLookUp), tablesToLookUp, tablePrefixInfo); } - static Set getSSTFileListForSnapshot(OmSnapshot snapshot, Set tablesToLookUp, + static Set getSSTFileSetForSnapshot(OmSnapshot snapshot, Set tablesToLookUp, TablePrefixInfo tablePrefixInfo) { return filterRelevantSstFiles(RdbUtil.getSSTFilesForComparison(((RDBStore)snapshot.getMetadataManager().getStore()) .getDb().getManagedRocksDb(), tablesToLookUp), tablesToLookUp, tablePrefixInfo); From 1067935bf3d237acb256ecb136771c165c64aa7e Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 14 Nov 2025 13:33:37 -0500 Subject: [PATCH 114/126] HDDS-13912. Fix interface Change-Id: Iacf5dd1fd2013d6203ea7003645cb79433186519 --- .../om/snapshot/diff/delta/DeltaFileComputer.java | 3 +-- .../diff/delta/FileLinkDeltaFileComputer.java | 10 ++++++---- .../diff/delta/TestFileLinkDeltaFileComputer.java | 13 ++++++------- .../snapshot/diff/delta/TestFullDiffComputer.java | 3 +-- 4 files changed, 14 insertions(+), 15 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/DeltaFileComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/DeltaFileComputer.java index b93dfae3c657..2cf668e7241a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/DeltaFileComputer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/DeltaFileComputer.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.nio.file.Path; import java.util.Collection; -import java.util.Optional; import java.util.Set; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -46,6 +45,6 @@ public interface DeltaFileComputer extends Closeable { * an empty {@code Optional} if no changes are found * @throws IOException if an I/O error occurs while retrieving delta files */ - Optional>> getDeltaFiles(SnapshotInfo fromSnapshot, SnapshotInfo toSnapshot, + Collection> getDeltaFiles(SnapshotInfo fromSnapshot, SnapshotInfo toSnapshot, Set tablesToLookup) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java index 8e9994c6abd7..46a9065a3956 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java @@ -87,18 +87,20 @@ public abstract class FileLinkDeltaFileComputer implements DeltaFileComputer { * @param tablePrefixInfo information about table prefixes to apply during computation * @return an Optional containing a map where the key is the delta file path, and the value * is a pair consisting of a resolved path and the corresponding SST file information. - * If there are no delta files, returns an empty Optional. + * Return empty if the delta files could not be computed. * @throws IOException if an I/O error occurs during the computation process */ abstract Optional>> computeDeltaFiles(SnapshotInfo fromSnapshot, SnapshotInfo toSnapshot, Set tablesToLookup, TablePrefixInfo tablePrefixInfo) throws IOException; @Override - public Optional>> getDeltaFiles(SnapshotInfo fromSnapshot, SnapshotInfo toSnapshot, - Set tablesToLookup) throws IOException { + public final Collection> getDeltaFiles(SnapshotInfo fromSnapshot, + SnapshotInfo toSnapshot, Set tablesToLookup) throws IOException { TablePrefixInfo tablePrefixInfo = activeMetadataManager.getTableBucketPrefix(fromSnapshot.getVolumeName(), fromSnapshot.getBucketName()); - return computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo).map(Map::values); + return computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, + tablePrefixInfo).map(Map::values).orElseThrow(() -> new IOException("Failed to compute delta files for " + + "snapshots " + fromSnapshot + " and " + toSnapshot)); } void updateActivity(SubStatus status) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java index 44d16d25c8dc..b53fbb957e00 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFileLinkDeltaFileComputer.java @@ -22,6 +22,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -33,6 +34,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -195,6 +197,7 @@ public void testCreateLinkWhenLinkExists() throws IOException { Path expectedNextLink = deltaDirPath.resolve("2.sst"); Files.createFile(expectedNextLink); + expectedNextLink = deltaDirPath.resolve("3.sst"); // Try to create another link - it should handle the FileAlreadyExistsException Path secondLink = deltaFileComputer.createLink(sourceFile); assertEquals(expectedNextLink, secondLink); @@ -363,11 +366,10 @@ public void testGetDeltaFiles() throws IOException { deltaFileComputer.setComputeDeltaFilesResult(Optional.of(deltaMap)); - Optional>> result = + Collection> result = deltaFileComputer.getDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup); - assertTrue(result.isPresent(), "Result should be present"); - assertEquals(1, result.get().size(), "Should have one delta file"); + assertEquals(1, result.size(), "Should have one delta file"); verify(activeMetadataManager, times(1)).getTableBucketPrefix("vol1", "bucket1"); } @@ -388,10 +390,7 @@ public void testGetDeltaFilesReturnsEmpty() throws IOException { deltaFileComputer.setComputeDeltaFilesResult(Optional.empty()); - Optional>> result = - deltaFileComputer.getDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup); - - assertFalse(result.isPresent(), "Result should be empty"); + assertThrows(IOException.class, () -> deltaFileComputer.getDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup)); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java index faab2028d97a..4a3f5bfe12f4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestFullDiffComputer.java @@ -249,8 +249,7 @@ public void testComputeDeltaFiles(String description, Collectors.toMap(entry -> snapDirectory.resolve(entry.getKey()), Map.Entry::getValue)), result); - Set iNodes = fullDiffComputer.getDeltaFiles(fromSnapshot, toSnapshot, - tablesToLookup).orElse(Collections.emptyList()).stream() + Set iNodes = fullDiffComputer.getDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup).stream() .map(Pair::getKey).map(path -> { try { return getINode(path); From b08637627a9946df01dcac51aba4949d43ab9d7f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 14 Nov 2025 13:50:16 -0500 Subject: [PATCH 115/126] HDDS-13912. Fix exception message Change-Id: I22a212c84a3dbcd66a8e61a940d4df239b73133f --- .../om/snapshot/diff/delta/FileLinkDeltaFileComputer.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java index 46a9065a3956..d01f7816ae9a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java @@ -99,8 +99,9 @@ public final Collection> getDeltaFiles(SnapshotInfo from TablePrefixInfo tablePrefixInfo = activeMetadataManager.getTableBucketPrefix(fromSnapshot.getVolumeName(), fromSnapshot.getBucketName()); return computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, - tablePrefixInfo).map(Map::values).orElseThrow(() -> new IOException("Failed to compute delta files for " + - "snapshots " + fromSnapshot + " and " + toSnapshot)); + tablePrefixInfo).map(Map::values).orElseThrow(() -> new IOException(String.format( + "Failed to compute delta files for snapshots %s and %s tablesToLookup : %s", fromSnapshot, toSnapshot, + tablesToLookup))); } void updateActivity(SubStatus status) { From 4b1cf6339fbd73f49eb54735a72494e18405bc00 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 17 Nov 2025 09:39:09 -0500 Subject: [PATCH 116/126] HDDS-13929. Modularise Snapshot Delta file computer (Efficient Diff) Change-Id: I4ab5c3d469b3729271ccccdf926699ec94b96e21 --- .../ozone/rocksdb/util/SstFileSetReader.java | 17 +- .../rocksdiff/RocksDBCheckpointDiffer.java | 38 +- .../ozone/rocksdiff/RocksDiffUtils.java | 2 +- .../rocksdb/util/TestSstFileSetReader.java | 23 +- .../om/snapshot/SnapshotDiffManager.java | 204 +---- .../delta/CompositeDeltaDiffComputer.java | 130 ++++ .../diff/delta/RDBDifferComputer.java | 117 +++ .../om/snapshot/TestSnapshotDiffManager.java | 327 +------- .../delta/TestCompositeDeltaDiffComputer.java | 726 ++++++++++++++++++ .../diff/delta/TestRDBDifferComputer.java | 535 +++++++++++++ 10 files changed, 1575 insertions(+), 544 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/CompositeDeltaDiffComputer.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestCompositeDeltaDiffComputer.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java index 4c0e1a9c5017..675f8fbd398d 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java @@ -23,6 +23,7 @@ import java.io.Closeable; import java.io.IOException; import java.io.UncheckedIOException; +import java.nio.file.Path; import java.util.Collection; import java.util.Comparator; import java.util.NoSuchElementException; @@ -51,11 +52,11 @@ */ public class SstFileSetReader { - private final Collection sstFiles; + private final Collection sstFiles; private volatile long estimatedTotalKeys = -1; - public SstFileSetReader(final Collection sstFiles) { + public SstFileSetReader(final Collection sstFiles) { this.sstFiles = sstFiles; } @@ -77,9 +78,9 @@ public long getEstimatedTotalKeys() throws RocksDBException { } try (ManagedOptions options = new ManagedOptions()) { - for (String sstFile : sstFiles) { + for (Path sstFile : sstFiles) { try (ManagedSstFileReader fileReader = new ManagedSstFileReader(options)) { - fileReader.open(sstFile); + fileReader.open(sstFile.toAbsolutePath().toString()); estimatedSize += fileReader.getTableProperties().getNumEntries(); } } @@ -303,7 +304,7 @@ public int hashCode() { private abstract static class MultipleSstFileIterator> implements ClosableIterator { private final PriorityQueue> minHeap; - private MultipleSstFileIterator(Collection sstFiles) { + private MultipleSstFileIterator(Collection sstFiles) { this.minHeap = new PriorityQueue<>(); init(); initMinHeap(sstFiles); @@ -313,10 +314,10 @@ private MultipleSstFileIterator(Collection sstFiles) { protected abstract ClosableIterator getKeyIteratorForFile(String file) throws RocksDBException, IOException; - private void initMinHeap(Collection files) { + private void initMinHeap(Collection files) { try { - for (String file : files) { - ClosableIterator iterator = getKeyIteratorForFile(file); + for (Path file : files) { + ClosableIterator iterator = getKeyIteratorForFile(file.toAbsolutePath().toString()); HeapEntry entry = new HeapEntry<>(iterator); if (entry.getCurrentKey() != null) { diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index c9c4a6602d9a..b27dd2f9816e 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -741,26 +741,25 @@ private void preconditionChecksForLoadAllCompactionLogs() { * exist in backup directory before being involved in compactions), * and appends the extension '.sst'. */ - private String getSSTFullPath(String sstFilenameWithoutExtension, Path... dbPaths) { + private Path getSSTFullPath(SstFileInfo sstFileInfo, Path... dbPaths) throws IOException { // Try to locate the SST in the backup dir first - final Path sstPathInBackupDir = Paths.get(sstBackupDir, sstFilenameWithoutExtension + SST_FILE_EXTENSION); + final Path sstPathInBackupDir = sstFileInfo.getFilePath(Paths.get(sstBackupDir).toAbsolutePath()); if (Files.exists(sstPathInBackupDir)) { - return sstPathInBackupDir.toString(); + return sstPathInBackupDir.toAbsolutePath(); } // SST file does not exist in the SST backup dir, this means the SST file // has not gone through any compactions yet and is only available in the // src DB directory or destDB directory for (Path dbPath : dbPaths) { - final Path sstPathInDBDir = dbPath.resolve(sstFilenameWithoutExtension + SST_FILE_EXTENSION); + final Path sstPathInDBDir = sstFileInfo.getFilePath(dbPath); if (Files.exists(sstPathInDBDir)) { - return sstPathInDBDir.toString(); + return sstPathInDBDir.toAbsolutePath(); } } - // TODO: More graceful error handling? - throw new RuntimeException("Unable to locate SST file: " + sstFilenameWithoutExtension); + throw new IOException("Unable to locate SST file: " + sstFileInfo); } /** @@ -772,15 +771,13 @@ private String getSSTFullPath(String sstFilenameWithoutExtension, Path... dbPath * @param dest destination snapshot * @param versionMap version map containing the connection between source snapshot version and dest snapshot version. * @param tablesToLookup tablesToLookup set of table (column family) names used to restrict which SST files to return. - * @param sstFilesDirForSnapDiffJob dir to create hardlinks for SST files - * for snapDiff job. * @return A list of SST files without extension. * e.g. ["/path/to/sstBackupDir/000050.sst", * "/path/to/sstBackupDir/000060.sst"] */ - public synchronized Optional> getSSTDiffListWithFullPath(DifferSnapshotInfo src, + public synchronized Optional> getSSTDiffListWithFullPath(DifferSnapshotInfo src, DifferSnapshotInfo dest, Map versionMap, TablePrefixInfo prefixInfo, - Set tablesToLookup, String sstFilesDirForSnapDiffJob) throws IOException { + Set tablesToLookup) throws IOException { int srcVersion = src.getMaxVersion(); if (!versionMap.containsKey(srcVersion)) { throw new IOException("No corresponding dest version corresponding srcVersion : " + srcVersion + " in " + @@ -792,16 +789,15 @@ public synchronized Optional> getSSTDiffListWithFullPath(DifferSnap Optional> sstDiffList = getSSTDiffList(srcSnapshotVersion, destSnapshotVersion, prefixInfo, tablesToLookup, srcVersion == 0); - - return sstDiffList.map(diffList -> diffList.stream() - .map(sst -> { - String sstFullPath = getSSTFullPath(sst.getFileName(), srcSnapshotVersion.getDbPath(), - destSnapshotVersion.getDbPath()); - Path link = sst.getFilePath(Paths.get(sstFilesDirForSnapDiffJob)); - Path srcFile = Paths.get(sstFullPath); - createLink(link, srcFile); - return link.toString(); - }).collect(Collectors.toList())); + if (sstDiffList.isPresent()) { + Map sstFileInfoMap = new HashMap<>(); + for (SstFileInfo sstFileInfo : sstDiffList.get()) { + Path sstPath = getSSTFullPath(sstFileInfo, srcSnapshotVersion.getDbPath()); + sstFileInfoMap.put(sstPath, sstFileInfo); + } + return Optional.of(sstFileInfoMap); + } + return Optional.empty(); } /** diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java index 1e4100c66679..124835918fba 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java @@ -65,7 +65,7 @@ public static Map filterRelevantSstFiles(Map /** * Filter sst files based on prefixes. */ - public static Set filterRelevantSstFiles(Set inputFiles, + public static Set filterRelevantSstFiles(Set inputFiles, Set tablesToLookup, TablePrefixInfo tablePrefixInfo) { for (Iterator fileIterator = inputFiles.iterator(); fileIterator.hasNext();) { SstFileInfo sstFileInfo = fileIterator.next(); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java index 13b3e6dc5853..4cf008cadbcd 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java @@ -24,6 +24,7 @@ import static org.junit.jupiter.api.Assumptions.assumeTrue; import java.io.File; +import java.nio.file.Path; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -74,7 +75,7 @@ class TestSstFileSetReader { * @return Absolute path to the created SST file * @throws RocksDBException if there's an error during SST file creation */ - private String createRandomSSTFile(TreeMap keys) + private Path createRandomSSTFile(TreeMap keys) throws RocksDBException { File file = new File(tempDir, "tmp_sst_file" + fileCounter.incrementAndGet() + ".sst"); @@ -94,7 +95,7 @@ private String createRandomSSTFile(TreeMap keys) sstFileWriter.finish(); } assertTrue(file.exists()); - return file.getAbsolutePath(); + return file.getAbsoluteFile().toPath(); } /** @@ -121,8 +122,8 @@ private Map createKeys(int startRange, int endRange) { * @return Pair containing the complete sorted key map and list of SST file paths * @throws RocksDBException if there's an error during SST file creation */ - private Pair, List> createDummyData(int numberOfFiles) throws RocksDBException { - List files = new ArrayList<>(); + private Pair, List> createDummyData(int numberOfFiles) throws RocksDBException { + List files = new ArrayList<>(); int numberOfKeysPerFile = 1000; TreeMap keys = new TreeMap<>(createKeys(0, numberOfKeysPerFile * numberOfFiles)); @@ -136,7 +137,7 @@ private Pair, List> createDummyData(int numbe cnt += 1; } for (TreeMap fileKeys : fileKeysList) { - String tmpSSTFile = createRandomSSTFile(fileKeys); + Path tmpSSTFile = createRandomSSTFile(fileKeys); files.add(tmpSSTFile); } return Pair.of(keys, files); @@ -153,8 +154,8 @@ private Pair, List> createDummyData(int numbe @ValueSource(ints = {0, 1, 2, 3, 7, 10}) public void testGetKeyStream(int numberOfFiles) throws RocksDBException { - Pair, List> data = createDummyData(numberOfFiles); - List files = data.getRight(); + Pair, List> data = createDummyData(numberOfFiles); + List files = data.getRight(); SortedMap keys = data.getLeft(); // Getting every possible combination of 2 elements from the sampled keys. // Reading the sst file lying within the given bounds and @@ -195,9 +196,9 @@ public void testGetKeyStream(int numberOfFiles) public void testGetKeyStreamWithTombstone(int numberOfFiles) throws RocksDBException { assumeTrue(ManagedRawSSTFileReader.tryLoadLibrary()); - Pair, List> data = + Pair, List> data = createDummyData(numberOfFiles); - List files = data.getRight(); + List files = data.getRight(); SortedMap keys = data.getLeft(); // Getting every possible combination of 2 elements from the sampled keys. // Reading the sst file lying within the given bounds and @@ -237,7 +238,7 @@ public void testMinHeapWithOverlappingSstFiles(int numberOfFiles) throws RocksDB assumeTrue(numberOfFiles >= 2); // Create overlapping SST files with some duplicate keys - List files = new ArrayList<>(); + List files = new ArrayList<>(); Map expectedKeys = new TreeMap<>(); // File 0: keys 0-9 (all valid entries) @@ -305,7 +306,7 @@ public void testDuplicateKeyHandlingWithLatestFilePrecedence(int numberOfFiles) throws RocksDBException { assumeTrue(numberOfFiles >= 3); - List files = new ArrayList<>(); + List files = new ArrayList<>(); // All files will contain the same set of keys, but we expect the last file to "win" String[] testKeys = {KEY_PREFIX + "duplicate1", KEY_PREFIX + "duplicate2", KEY_PREFIX + "duplicate3"}; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 219fc01f0a56..8271dd6315b2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.om.snapshot; -import static java.util.stream.Collectors.toMap; import static org.apache.commons.lang3.StringUtils.leftPad; import static org.apache.hadoop.hdds.StringUtils.getLexicographicallyHigherString; import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.CREATE; @@ -59,9 +58,6 @@ import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus.DIFF_REPORT_GEN; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus.OBJECT_ID_MAP_GEN_FSO; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus.OBJECT_ID_MAP_GEN_OBS; -import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus.SST_FILE_DELTA_DAG_WALK; -import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus.SST_FILE_DELTA_FULL_DIFF; -import static org.apache.ozone.rocksdiff.RocksDiffUtils.filterRelevantSstFiles; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Maps; @@ -76,15 +72,14 @@ import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.NavigableMap; import java.util.Objects; import java.util.Optional; import java.util.Set; -import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.Callable; @@ -94,6 +89,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.commons.io.file.PathUtils; import org.apache.commons.lang3.tuple.Pair; @@ -112,8 +109,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotLocalData; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -122,18 +117,18 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; -import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; +import org.apache.hadoop.ozone.om.snapshot.diff.delta.CompositeDeltaDiffComputer; +import org.apache.hadoop.ozone.om.snapshot.diff.delta.DeltaFileComputer; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.ListSnapshotDiffJobResponse; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus; import org.apache.hadoop.ozone.util.ClosableIterator; import org.apache.logging.log4j.util.Strings; -import org.apache.ozone.rocksdb.util.RdbUtil; import org.apache.ozone.rocksdb.util.SstFileInfo; import org.apache.ozone.rocksdb.util.SstFileSetReader; -import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.rocksdb.ColumnFamilyDescriptor; @@ -358,39 +353,6 @@ private void deleteDir(Path path) { } } - /** - * Convert from SnapshotInfo to DifferSnapshotInfo. - */ - private static DifferSnapshotInfo getDSIFromSI(OMMetadataManager activeOmMetadataManager, - SnapshotInfo snapshotInfo, OmSnapshotLocalData snapshotLocalData) throws IOException { - final UUID snapshotId = snapshotInfo.getSnapshotId(); - final long dbTxSequenceNumber = snapshotInfo.getDbTxSequenceNumber(); - NavigableMap> versionSstFiles = snapshotLocalData.getVersionSstFileInfos() - .entrySet().stream().collect(toMap(Map.Entry::getKey, entry -> entry.getValue().getSstFiles(), - (u, v) -> { - throw new IllegalStateException(String.format("Duplicate key %s", u)); - }, TreeMap::new)); - if (versionSstFiles.isEmpty()) { - throw new IOException(String.format("No versions found corresponding to %s", snapshotId)); - } - return new DifferSnapshotInfo( - version -> OmSnapshotManager.getSnapshotPath(activeOmMetadataManager, snapshotId, version), - snapshotId, dbTxSequenceNumber, versionSstFiles); - } - - @VisibleForTesting - protected Set getSSTFileSetForSnapshot(OmSnapshot snapshot, Set tablesToLookUp) { - return RdbUtil.getSSTFilesForComparison( - ((RDBStore)snapshot.getMetadataManager().getStore()).getDb().getManagedRocksDb(), tablesToLookUp); - } - - @VisibleForTesting - protected Map getSSTFileMapForSnapshot(OmSnapshot snapshot, - Set tablesToLookUp) throws IOException { - return RdbUtil.getSSTFilesWithInodesForComparison(((RDBStore)snapshot - .getMetadataManager().getStore()).getDb().getManagedRocksDb(), tablesToLookUp); - } - /** * Gets the report key for a particular index of snapshot diff job. */ @@ -842,17 +804,21 @@ void generateSnapshotDiffReport(final String jobKey, // hardlinks. JobId is used as dir name for uniqueness. // It is required to prevent that SST files get deleted for in_progress // job by RocksDBCheckpointDiffer#pruneOlderSnapshotsWithCompactionHistory. - Path path = Paths.get(sstBackupDirForSnapDiffJobs + "/" + jobId); + Path diffJobPath = Paths.get(sstBackupDirForSnapDiffJobs).resolve(jobId); UncheckedAutoCloseableSupplier rcFromSnapshot = null; UncheckedAutoCloseableSupplier rcToSnapshot = null; - try { + boolean useFullDiff = snapshotForceFullDiff || forceFullDiff; + boolean performNonNativeDiff = diffDisableNativeLibs || disableNativeDiff || !isNativeLibsLoaded; + + Consumer activityReporter = (jobStatus) -> recordActivity(jobKey, jobStatus); + try (DeltaFileComputer deltaFileComputer = new CompositeDeltaDiffComputer(ozoneManager.getOmSnapshotManager(), + activeOmMetadataManager, diffJobPath, activityReporter, useFullDiff, performNonNativeDiff)) { if (!areDiffJobAndSnapshotsActive(volumeName, bucketName, fromSnapshotName, toSnapshotName)) { return; } - rcFromSnapshot = ozoneManager.getOmSnapshotManager() .getActiveSnapshot(volumeName, bucketName, fromSnapshotName); @@ -866,8 +832,6 @@ void generateSnapshotDiffReport(final String jobKey, volumeName, bucketName, fromSnapshotName); SnapshotInfo tsInfo = getSnapshotInfo(ozoneManager, volumeName, bucketName, toSnapshotName); - - Files.createDirectories(path); // JobId is prepended to column families name to make them unique // for request. fromSnapshotColumnFamily = @@ -901,9 +865,6 @@ void generateSnapshotDiffReport(final String jobKey, fromSnapshot.getMetadataManager()); TablePrefixInfo tablePrefixes = toSnapshot.getMetadataManager().getTableBucketPrefix(volumeName, bucketName); - boolean useFullDiff = snapshotForceFullDiff || forceFullDiff; - boolean performNonNativeDiff = diffDisableNativeLibs || disableNativeDiff; - if (!areDiffJobAndSnapshotsActive(volumeName, bucketName, fromSnapshotName, toSnapshotName)) { return; @@ -945,22 +906,20 @@ void generateSnapshotDiffReport(final String jobKey, () -> { recordActivity(jobKey, OBJECT_ID_MAP_GEN_OBS); getDeltaFilesAndDiffKeysToObjectIdToKeyMap(fsKeyTable, tsKeyTable, - fromSnapshot, toSnapshot, fsInfo, tsInfo, useFullDiff, - performNonNativeDiff, tablePrefixes, + fsInfo, tsInfo, performNonNativeDiff, tablePrefixes, objectIdToKeyNameMapForFromSnapshot, objectIdToKeyNameMapForToSnapshot, objectIdToIsDirMap, - oldParentIds, newParentIds, path.toString(), jobKey); + oldParentIds, newParentIds, deltaFileComputer, jobKey); return null; }, () -> { if (bucketLayout.isFileSystemOptimized()) { recordActivity(jobKey, OBJECT_ID_MAP_GEN_FSO); getDeltaFilesAndDiffKeysToObjectIdToKeyMap(fsDirTable, tsDirTable, - fromSnapshot, toSnapshot, fsInfo, tsInfo, useFullDiff, - performNonNativeDiff, tablePrefixes, + fsInfo, tsInfo, performNonNativeDiff, tablePrefixes, objectIdToKeyNameMapForFromSnapshot, objectIdToKeyNameMapForToSnapshot, objectIdToIsDirMap, - oldParentIds, newParentIds, path.toString(), jobKey); + oldParentIds, newParentIds, deltaFileComputer, jobKey); } return null; }, @@ -1034,8 +993,6 @@ void generateSnapshotDiffReport(final String jobKey, dropAndCloseColumnFamilyHandle(fromSnapshotColumnFamily); dropAndCloseColumnFamilyHandle(toSnapshotColumnFamily); dropAndCloseColumnFamilyHandle(objectIDsColumnFamily); - // Delete SST files backup directory. - deleteDir(path); // Decrement ref counts if (rcFromSnapshot != null) { rcFromSnapshot.close(); @@ -1050,38 +1007,22 @@ void generateSnapshotDiffReport(final String jobKey, private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( final Table fsTable, final Table tsTable, - final OmSnapshot fromSnapshot, final OmSnapshot toSnapshot, final SnapshotInfo fsInfo, final SnapshotInfo tsInfo, - final boolean useFullDiff, final boolean skipNativeDiff, - final TablePrefixInfo tablePrefixes, + boolean skipNativeDiff, final TablePrefixInfo tablePrefixes, final PersistentMap oldObjIdToKeyMap, final PersistentMap newObjIdToKeyMap, final PersistentMap objectIdToIsDirMap, - final Optional> oldParentIds, - final Optional> newParentIds, - final String diffDir, final String jobKey) throws IOException, RocksDBException { + final Optional> oldParentIds, final Optional> newParentIds, + final DeltaFileComputer deltaFileComputer, final String jobKey) throws IOException, RocksDBException { Set tablesToLookUp = Collections.singleton(fsTable.getName()); - Set deltaFiles = getDeltaFiles(fromSnapshot, toSnapshot, - tablesToLookUp, fsInfo, tsInfo, useFullDiff, tablePrefixes, diffDir, jobKey); - - // Workaround to handle deletes if native rocksDb tool for reading - // tombstone is not loaded. - // TODO: [SNAPSHOT] Update Rocksdb SSTFileIterator to read tombstone - if (skipNativeDiff || !isNativeLibsLoaded) { - Set inputFiles = filterRelevantSstFiles(getSSTFileSetForSnapshot(fromSnapshot, tablesToLookUp), - tablesToLookUp, tablePrefixes); - Path fromSnapshotPath = fromSnapshot.getMetadataManager().getStore().getDbLocation().getAbsoluteFile().toPath(); - for (SstFileInfo sstFileInfo : inputFiles) { - deltaFiles.add(sstFileInfo.getFilePath(fromSnapshotPath).toAbsolutePath().toString()); - } - } + Collection> deltaFiles = deltaFileComputer.getDeltaFiles(fsInfo, tsInfo, + tablesToLookUp); if (LOG.isDebugEnabled()) { LOG.debug("Computed Delta SST File Set, Total count = {} ", deltaFiles.size()); } - addToObjectIdMap(fsTable, tsTable, deltaFiles, - !skipNativeDiff && isNativeLibsLoaded, - oldObjIdToKeyMap, newObjIdToKeyMap, objectIdToIsDirMap, oldParentIds, + addToObjectIdMap(fsTable, tsTable, deltaFiles.stream().map(Pair::getLeft).collect(Collectors.toList()), + !skipNativeDiff, oldObjIdToKeyMap, newObjIdToKeyMap, objectIdToIsDirMap, oldParentIds, newParentIds, tablePrefixes, jobKey); } @@ -1089,7 +1030,7 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( @SuppressWarnings("checkstyle:ParameterNumber") void addToObjectIdMap(Table fsTable, Table tsTable, - Set deltaFiles, boolean nativeRocksToolsLoaded, + Collection deltaFiles, boolean nativeRocksToolsLoaded, PersistentMap oldObjIdToKeyMap, PersistentMap newObjIdToKeyMap, PersistentMap objectIdToIsDirMap, @@ -1168,99 +1109,6 @@ void addToObjectIdMap(Table fsTable, } } - @VisibleForTesting - @SuppressWarnings("checkstyle:ParameterNumber") - Set getDeltaFiles(OmSnapshot fromSnapshot, - OmSnapshot toSnapshot, - Set tablesToLookUp, - SnapshotInfo fsInfo, - SnapshotInfo tsInfo, - boolean useFullDiff, - TablePrefixInfo tablePrefixInfo, - String diffDir, String jobKey) - throws IOException { - // TODO: [SNAPSHOT] Refactor the parameter list - Optional> deltaFiles = Optional.empty(); - // Check if compaction DAG is available, use that if so - if (differ != null && fsInfo != null && tsInfo != null && !useFullDiff) { - try (ReadableOmSnapshotLocalDataProvider snapLocalDataProvider = snapshotLocalDataManager.getOmSnapshotLocalData( - toSnapshot.getSnapshotID(), fromSnapshot.getSnapshotID())) { - OmSnapshotLocalData toSnapshotLocalData = snapLocalDataProvider.getSnapshotLocalData(); - OmSnapshotLocalData fromSnapshotLocalData = snapLocalDataProvider.getPreviousSnapshotLocalData(); - // Construct DifferSnapshotInfo - final DifferSnapshotInfo fromDSI = getDSIFromSI(activeOmMetadataManager, fsInfo, fromSnapshotLocalData); - final DifferSnapshotInfo toDSI = getDSIFromSI(activeOmMetadataManager, tsInfo, toSnapshotLocalData); - - recordActivity(jobKey, SST_FILE_DELTA_DAG_WALK); - LOG.debug("Calling RocksDBCheckpointDiffer"); - final Map versionMap = toSnapshotLocalData.getVersionSstFileInfos().entrySet() - .stream().collect(toMap(Map.Entry::getKey, entry -> entry.getValue().getPreviousSnapshotVersion())); - deltaFiles = differ.getSSTDiffListWithFullPath(toDSI, fromDSI, versionMap, tablePrefixInfo, tablesToLookUp, - diffDir).map(HashSet::new); - } catch (Exception exception) { - recordActivity(jobKey, SST_FILE_DELTA_FULL_DIFF); - LOG.warn("Failed to get SST diff file using RocksDBCheckpointDiffer. " + - "It will fallback to full diff now.", exception); - } - } - - if (useFullDiff || !deltaFiles.isPresent()) { - // If compaction DAG is not available (already cleaned up), fall back to - // the slower approach. - if (!useFullDiff) { - LOG.warn("RocksDBCheckpointDiffer is not available, falling back to" + - " slow path"); - } - recordActivity(jobKey, SST_FILE_DELTA_FULL_DIFF); - Set diffFiles = getDiffFiles(fromSnapshot, toSnapshot, tablesToLookUp, tablePrefixInfo); - deltaFiles = Optional.of(diffFiles); - } - - return deltaFiles.orElseThrow(() -> - new IOException("Error getting diff files b/w " + fromSnapshot.getSnapshotTableKey() + " and " + - toSnapshot.getSnapshotTableKey())); - } - - private Set getDiffFiles(OmSnapshot fromSnapshot, OmSnapshot toSnapshot, Set tablesToLookUp, - TablePrefixInfo tablePrefixInfo) { - Set diffFiles; - Path fromSnapshotPath = fromSnapshot.getMetadataManager().getStore().getDbLocation().getAbsoluteFile().toPath(); - Path toSnapshotPath = toSnapshot.getMetadataManager().getStore().getDbLocation().getAbsoluteFile().toPath(); - try { - diffFiles = new HashSet<>(); - Map fromSnapshotFiles = filterRelevantSstFiles(getSSTFileMapForSnapshot(fromSnapshot, - tablesToLookUp), tablesToLookUp, tablePrefixInfo); - Map toSnapshotFiles = filterRelevantSstFiles(getSSTFileMapForSnapshot(toSnapshot, - tablesToLookUp), tablesToLookUp, tablePrefixInfo); - for (Map.Entry entry : fromSnapshotFiles.entrySet()) { - if (!toSnapshotFiles.containsKey(entry.getKey())) { - diffFiles.add(entry.getValue().getFilePath(fromSnapshotPath).toAbsolutePath().toString()); - } - } - for (Map.Entry entry : toSnapshotFiles.entrySet()) { - if (!fromSnapshotFiles.containsKey(entry.getKey())) { - diffFiles.add(entry.getValue().getFilePath(toSnapshotPath).toAbsolutePath().toString()); - } - } - } catch (IOException e) { - // In case of exception during inode read use all files - LOG.error("Exception occurred while populating delta files for snapDiff", e); - LOG.warn("Falling back to full file list comparison, inode-based optimization skipped."); - Set fromSnapshotFiles = filterRelevantSstFiles(getSSTFileSetForSnapshot(fromSnapshot, - tablesToLookUp), tablesToLookUp, tablePrefixInfo); - Set toSnapshotFiles = filterRelevantSstFiles(getSSTFileSetForSnapshot(toSnapshot, - tablesToLookUp), tablesToLookUp, tablePrefixInfo); - diffFiles = new HashSet<>(); - for (SstFileInfo sstFileInfo : fromSnapshotFiles) { - diffFiles.add(sstFileInfo.getFilePath(fromSnapshotPath).toAbsolutePath().toString()); - } - for (SstFileInfo sstFileInfo : toSnapshotFiles) { - diffFiles.add(sstFileInfo.getFilePath(toSnapshotPath).toAbsolutePath().toString()); - } - } - return diffFiles; - } - private void validateEstimatedKeyChangesAreInLimits( SstFileSetReader sstFileReader ) throws RocksDBException, IOException { @@ -1599,7 +1447,7 @@ private synchronized void updateJobStatus(String jobKey, } synchronized void recordActivity(String jobKey, - SnapshotDiffResponse.SubStatus subStatus) { + SubStatus subStatus) { SnapshotDiffJob snapshotDiffJob = snapDiffJobTable.get(jobKey); snapshotDiffJob.setSubStatus(subStatus); snapDiffJobTable.put(jobKey, snapshotDiffJob); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/CompositeDeltaDiffComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/CompositeDeltaDiffComputer.java new file mode 100644 index 000000000000..5c109bd0f996 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/CompositeDeltaDiffComputer.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.diff.delta; + +import static org.apache.hadoop.ozone.om.snapshot.diff.delta.FullDiffComputer.getSSTFileSetForSnapshot; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Consumer; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; +import org.apache.ozone.rocksdb.util.SstFileInfo; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * CompositeDeltaDiffComputer is responsible for computing the delta file + * differences between two snapshots, utilizing different strategies such + * as partial differ computation and full differ computation. + * + * It serves as an orchestrator to decide whether to perform a full diff + * or a more efficient partial diff, and handles fallback mechanisms if + * the chosen method fails. + * + * The class leverages two main difference computation strategies: + * - {@code RDBDifferComputer} for partial diff computation + * - {@code FullDiffComputer} for exhaustive diff + * + * This class also includes support for handling non-native diff scenarios + * through additional processing of input files from the "from" snapshot + * when native RocksDB tools are not used. + * + * Inherits from {@code FileLinkDeltaFileComputer} and implements the + * functionality for computing delta files and resource management. + */ +public class CompositeDeltaDiffComputer extends FileLinkDeltaFileComputer { + + private static final Logger LOG = LoggerFactory.getLogger(CompositeDeltaDiffComputer.class); + + private final RDBDifferComputer differComputer; + private final FullDiffComputer fullDiffComputer; + private final boolean nonNativeDiff; + + public CompositeDeltaDiffComputer(OmSnapshotManager snapshotManager, + OMMetadataManager activeMetadataManager, Path deltaDirPath, + Consumer activityReporter, boolean fullDiff, + boolean nonNativeDiff) throws IOException { + super(snapshotManager, activeMetadataManager, deltaDirPath, activityReporter); + differComputer = fullDiff ? null : new RDBDifferComputer(snapshotManager, activeMetadataManager, + deltaDirPath.resolve("rdbDiffer"), activityReporter); + fullDiffComputer = new FullDiffComputer(snapshotManager, activeMetadataManager, + deltaDirPath.resolve("fullDiff"), activityReporter); + this.nonNativeDiff = nonNativeDiff; + } + + @Override + Optional>> computeDeltaFiles(SnapshotInfo fromSnapshotInfo, + SnapshotInfo toSnapshotInfo, Set tablesToLookup, TablePrefixInfo tablePrefixInfo) throws IOException { + Map> deltaFiles = null; + try { + if (differComputer != null) { + updateActivity(SnapshotDiffResponse.SubStatus.SST_FILE_DELTA_DAG_WALK); + deltaFiles = differComputer.computeDeltaFiles(fromSnapshotInfo, toSnapshotInfo, tablesToLookup, + tablePrefixInfo).orElse(null); + } + } catch (Exception e) { + LOG.error("Falling back to full diff.", e); + } + if (deltaFiles == null) { + updateActivity(SnapshotDiffResponse.SubStatus.SST_FILE_DELTA_FULL_DIFF); + deltaFiles = fullDiffComputer.computeDeltaFiles(fromSnapshotInfo, toSnapshotInfo, tablesToLookup, + tablePrefixInfo).orElse(null); + if (deltaFiles == null) { + // FileLinkDeltaFileComputer would throw an exception in this case. + return Optional.empty(); + } + } + // Workaround to handle deletes if native rocksDb tool for reading + // tombstone is not loaded. + // When performing non native diff, input files of from snapshot needs to be added. + if (nonNativeDiff) { + try (UncheckedAutoCloseableSupplier fromSnapshot = getSnapshot(fromSnapshotInfo)) { + Set fromSnapshotFiles = getSSTFileSetForSnapshot(fromSnapshot.get(), tablesToLookup, + tablePrefixInfo); + Path fromSnapshotPath = fromSnapshot.get().getMetadataManager().getStore().getDbLocation() + .getAbsoluteFile().toPath(); + for (SstFileInfo sstFileInfo : fromSnapshotFiles) { + Path source = sstFileInfo.getFilePath(fromSnapshotPath); + deltaFiles.put(source, Pair.of(createLink(source), sstFileInfo)); + } + } + } + return Optional.of(deltaFiles); + } + + @Override + public void close() throws IOException { + if (differComputer != null) { + differComputer.close(); + } + if (fullDiffComputer != null) { + fullDiffComputer.close(); + } + super.close(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java new file mode 100644 index 000000000000..0a59029fb0f4 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.diff.delta; + +import static java.util.stream.Collectors.toMap; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Optional; +import java.util.Set; +import java.util.TreeMap; +import java.util.UUID; +import java.util.function.Consumer; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus; +import org.apache.ozone.rocksdb.util.SstFileInfo; +import org.apache.ozone.rocksdiff.DifferSnapshotInfo; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; + +/** + * Computes RocksDB SST file differences between two snapshots and materializes + * differing SST files as hard links in the configured delta directory. + * + *

This class uses {@link RocksDBCheckpointDiffer} to obtain the list of SST + * files that differ between a \"from\" and a \"to\" snapshot. It opens local + * snapshot metadata via {@link #getLocalDataProvider}, and delegates the + * comparison to the differ to compute the delta files.

+ * + *

Each source SST file returned by the differ is linked into the delta + * directory using {@link FileLinkDeltaFileComputer#createLink(Path)}, and the + * returned value from {@link #computeDeltaFiles} is a list of those link + * paths. The implementation synchronizes on the internal {@code differ} + * instance because the differ is not assumed to be thread-safe.

+ */ +class RDBDifferComputer extends FileLinkDeltaFileComputer { + + private final RocksDBCheckpointDiffer differ; + + RDBDifferComputer(OmSnapshotManager omSnapshotManager, OMMetadataManager activeMetadataManager, + Path deltaDirPath, Consumer activityReporter) throws IOException { + super(omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter); + this.differ = activeMetadataManager.getStore().getRocksDBCheckpointDiffer(); + } + + @Override + public Optional>> computeDeltaFiles(SnapshotInfo fromSnapshot, + SnapshotInfo toSnapshot, Set tablesToLookup, TablePrefixInfo tablePrefixInfo) throws IOException { + if (differ != null) { + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider snapProvider = + getLocalDataProvider(toSnapshot.getSnapshotId(), fromSnapshot.getSnapshotId())) { + final DifferSnapshotInfo fromDSI = getDSIFromSI(getActiveMetadataManager(), fromSnapshot, + snapProvider.getPreviousSnapshotLocalData()); + final DifferSnapshotInfo toDSI = getDSIFromSI(getActiveMetadataManager(), toSnapshot, + snapProvider.getSnapshotLocalData()); + final Map versionMap = snapProvider.getSnapshotLocalData().getVersionSstFileInfos().entrySet() + .stream().collect(toMap(Map.Entry::getKey, entry -> entry.getValue().getPreviousSnapshotVersion())); + synchronized (differ) { + Optional> paths = differ.getSSTDiffListWithFullPath(toDSI, fromDSI, versionMap, + tablePrefixInfo, tablesToLookup); + if (paths.isPresent()) { + Map> links = new HashMap<>(paths.get().size()); + for (Map.Entry source : paths.get().entrySet()) { + links.put(source.getKey(), Pair.of(createLink(source.getKey()), source.getValue())); + } + return Optional.of(links); + } + } + } + } + return Optional.empty(); + } + + /** + * Convert from SnapshotInfo to DifferSnapshotInfo. + */ + private static DifferSnapshotInfo getDSIFromSI(OMMetadataManager activeOmMetadataManager, + SnapshotInfo snapshotInfo, OmSnapshotLocalData snapshotLocalData) throws IOException { + final UUID snapshotId = snapshotInfo.getSnapshotId(); + final long dbTxSequenceNumber = snapshotInfo.getDbTxSequenceNumber(); + NavigableMap> versionSstFiles = snapshotLocalData.getVersionSstFileInfos().entrySet() + .stream().collect(toMap(Map.Entry::getKey, + entry -> entry.getValue().getSstFiles(), (u, v) -> { + throw new IllegalStateException(String.format("Duplicate key %s", u)); + }, TreeMap::new)); + if (versionSstFiles.isEmpty()) { + throw new IOException(String.format("No versions found corresponding to %s", snapshotId)); + } + return new DifferSnapshotInfo( + version -> OmSnapshotManager.getSnapshotPath(activeOmMetadataManager, snapshotId, version), + snapshotId, dbTxSequenceNumber, versionSstFiles); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index b484ad628c72..c50d32e5b9b2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -49,7 +49,6 @@ import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.IN_PROGRESS; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.QUEUED; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.REJECTED; -import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.SST_FILE_EXTENSION; import static org.apache.ratis.util.JavaUtils.attempt; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -59,17 +58,11 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyBoolean; -import static org.mockito.Mockito.anyDouble; -import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.anyMap; -import static org.mockito.Mockito.anySet; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.atLeast; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockConstruction; @@ -80,17 +73,15 @@ import com.google.common.cache.CacheLoader; import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; import com.google.common.collect.Sets; import jakarta.annotation.Nonnull; import java.io.File; import java.io.IOException; -import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -105,7 +96,6 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.LongStream; @@ -129,7 +119,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -140,7 +129,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; import org.apache.hadoop.ozone.om.lock.OmReadOnlyLock; -import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.om.snapshot.SnapshotTestUtils.StubbedPersistentMap; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse.CancelMessage; @@ -151,17 +139,11 @@ import org.apache.hadoop.ozone.util.ClosableIterator; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ExitUtil; -import org.apache.ozone.rocksdb.util.RdbUtil; -import org.apache.ozone.rocksdb.util.SstFileInfo; import org.apache.ozone.rocksdb.util.SstFileSetReader; -import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; -import org.apache.ozone.rocksdiff.RocksDiffUtils; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.TimeDuration; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -171,15 +153,12 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.CsvSource; import org.junit.jupiter.params.provider.MethodSource; -import org.junit.jupiter.params.provider.ValueSource; import org.mockito.Mock; import org.mockito.MockedConstruction; import org.mockito.MockedStatic; -import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import org.mockito.stubbing.Answer; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -424,230 +403,6 @@ private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { return snapInfo; } - @ParameterizedTest - @ValueSource(ints = {0, 1, 2, 5, 10, 100, 1000, 10000}) - public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { - UUID snap1 = UUID.randomUUID(); - UUID snap2 = UUID.randomUUID(); - when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) - .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap2)); - when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) - .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); - - String diffDir = snapDiffDir.getAbsolutePath(); - String diffJobKey = snap1 + DELIMITER + snap2; - Set randomStrings = IntStream.range(0, numberOfFiles) - .mapToObj(i -> RandomStringUtils.secure().nextAlphabetic(10)) - .collect(Collectors.toSet()); - - when(differ.getSSTDiffListWithFullPath( - any(DifferSnapshotInfo.class), - any(DifferSnapshotInfo.class), - anyMap(), - any(TablePrefixInfo.class), - anySet(), - eq(diffDir)) - ).thenReturn(Optional.of(Lists.newArrayList(randomStrings))); - mockSnapshotLocalData(); - UncheckedAutoCloseableSupplier rcFromSnapshot = - omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); - UncheckedAutoCloseableSupplier rcToSnapshot = - omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); - OmSnapshot fromSnapshot = rcFromSnapshot.get(); - OmSnapshot toSnapshot = rcToSnapshot.get(); - - SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); - SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap2); - when(jobTableIterator.isValid()).thenReturn(false); - - SnapshotDiffManager spy = spy(snapshotDiffManager); - doNothing().when(spy).recordActivity(any(), any()); - doNothing().when(spy).updateProgress(anyString(), anyDouble()); - Set deltaFiles = spy.getDeltaFiles( - fromSnapshot, - toSnapshot, - Sets.newHashSet("cf1", "cf2"), fromSnapshotInfo, - toSnapshotInfo, false, - new TablePrefixInfo(Collections.emptyMap()), diffDir, diffJobKey); - assertEquals(randomStrings, deltaFiles); - - rcFromSnapshot.close(); - rcToSnapshot.close(); - } - - private void mockSnapshotLocalData() throws IOException { - OmSnapshotLocalData localData = mock(OmSnapshotLocalData.class); - ReadableOmSnapshotLocalDataProvider snapProvider = mock(ReadableOmSnapshotLocalDataProvider.class); - when(snapProvider.getPreviousSnapshotLocalData()).thenReturn(localData); - when(snapProvider.getSnapshotLocalData()).thenReturn(localData); - OmSnapshotLocalData.VersionMeta versionMeta = mock(OmSnapshotLocalData.VersionMeta.class); - when(versionMeta.getSstFiles()).thenReturn(Collections.emptyList()); - when(localData.getVersionSstFileInfos()).thenReturn(ImmutableMap.of(0, versionMeta)); - when(localDataManager.getOmSnapshotLocalData(any(UUID.class), any(UUID.class))).thenReturn(snapProvider); - } - - @ParameterizedTest - @CsvSource({"0,true", "1,true", "2,true", "5,true", "10,true", "100,true", - "1000,true", "10000,true", "0,false", "1,false", "2,false", "5,false", - "10,false", "100,false", "1000,false", "10000,false"}) - public void testGetDeltaFilesWithFullDiff(int numberOfFiles, - boolean useFullDiff) - throws IOException { - try (MockedStatic mockedRdbUtil = mockStatic(RdbUtil.class); - MockedStatic mockedRocksDiffUtils = - mockStatic(RocksDiffUtils.class)) { - Set deltaStrings = new HashSet<>(); - - mockedRdbUtil.when( - () -> RdbUtil.getSSTFilesWithInodesForComparison(any(), anySet())) - .thenAnswer(invocation -> { - Map retVal = IntStream.range(0, numberOfFiles) - .mapToObj(i -> RandomStringUtils.secure().nextAlphabetic(10)) - .collect(Collectors.toMap(Function.identity(), - i -> new SstFileInfo(i, null, null, null))); - deltaStrings.addAll(retVal.keySet().stream().map(Object::toString).collect(Collectors.toSet())); - return retVal; - }); - - mockedRocksDiffUtils.when(() -> - RocksDiffUtils.filterRelevantSstFiles(anyMap(), anySet(), any())) - .thenAnswer(invocationOnMock -> { - invocationOnMock.getArgument(0, Map.class).entrySet().stream() - .findAny().ifPresent(val -> { - Map.Entry entry = (Map.Entry) val; - assertTrue(deltaStrings.contains(entry.getKey())); - invocationOnMock.getArgument(0, Map.class).remove(entry.getKey()); - deltaStrings.remove(entry.getKey()); - }); - return invocationOnMock.getArgument(0, Map.class); - }); - UUID snap1 = UUID.randomUUID(); - UUID snap2 = UUID.randomUUID(); - String diffJobKey = snap1 + DELIMITER + snap2; - when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) - .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap2)); - when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) - .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); - if (!useFullDiff) { - when(differ.getSSTDiffListWithFullPath( - any(DifferSnapshotInfo.class), - any(DifferSnapshotInfo.class), - anyMap(), - any(TablePrefixInfo.class), - anySet(), - anyString())) - .thenReturn(Optional.empty()); - } - mockSnapshotLocalData(); - UncheckedAutoCloseableSupplier rcFromSnapshot = - omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); - UncheckedAutoCloseableSupplier rcToSnapshot = - omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); - OmSnapshot fromSnapshot = rcFromSnapshot.get(); - OmSnapshot toSnapshot = rcToSnapshot.get(); - - SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); - SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); - when(jobTableIterator.isValid()).thenReturn(false); - SnapshotDiffManager spy = spy(snapshotDiffManager); - doNothing().when(spy).recordActivity(any(), any()); - doNothing().when(spy).updateProgress(anyString(), anyDouble()); - Set deltaFiles = spy.getDeltaFiles( - fromSnapshot, - toSnapshot, - Sets.newHashSet("cf1", "cf2"), - fromSnapshotInfo, - toSnapshotInfo, - false, - new TablePrefixInfo(Collections.emptyMap()), - snapDiffDir.getAbsolutePath(), diffJobKey); - assertEquals(deltaStrings.stream() - .map(i -> dbStore.getDbLocation().toPath().resolve(i + SST_FILE_EXTENSION).toAbsolutePath().toString()) - .collect(Collectors.toSet()), deltaFiles); - if (useFullDiff && numberOfFiles > 1) { - assertThat(deltaFiles).isNotEmpty(); - } - } - } - - @ParameterizedTest - @ValueSource(ints = {0, 1, 2, 5, 10, 100, 1000, 10000}) - public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) - throws IOException { - try (MockedStatic mockedRdbUtil = mockStatic(RdbUtil.class); - MockedStatic mockedRocksDiffUtils = - mockStatic(RocksDiffUtils.class)) { - Set deltaStrings = new HashSet<>(); - - mockedRdbUtil.when( - () -> RdbUtil.getSSTFilesForComparison(any(), anySet())) - .thenAnswer((Answer>) invocation -> { - Set retVal = IntStream.range(0, numberOfFiles) - .mapToObj(i -> RandomStringUtils.secure().nextAlphabetic(10)) - .collect(Collectors.toSet()); - deltaStrings.addAll(retVal); - return retVal; - }); - - mockedRocksDiffUtils.when(() -> - RocksDiffUtils.filterRelevantSstFiles(anySet(), anySet(), any())) - .thenAnswer((Answer) invocationOnMock -> { - invocationOnMock.getArgument(0, Set.class).stream() - .findAny().ifPresent(val -> { - assertTrue(deltaStrings.contains(val)); - invocationOnMock.getArgument(0, Set.class).remove(val); - deltaStrings.remove(val); - }); - return null; - }); - UUID snap1 = UUID.randomUUID(); - UUID snap2 = UUID.randomUUID(); - when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) - .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap1)); - when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) - .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); - - doThrow(new RuntimeException("File not found exception.")) - .when(differ) - .getSSTDiffListWithFullPath( - any(DifferSnapshotInfo.class), - any(DifferSnapshotInfo.class), - anyMap(), - any(TablePrefixInfo.class), - anySet(), - anyString()); - - UncheckedAutoCloseableSupplier rcFromSnapshot = - omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); - UncheckedAutoCloseableSupplier rcToSnapshot = - omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); - OmSnapshot fromSnapshot = rcFromSnapshot.get(); - OmSnapshot toSnapshot = rcToSnapshot.get(); - - SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); - SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); - when(jobTableIterator.isValid()).thenReturn(false); - String diffJobKey = snap1 + DELIMITER + snap2; - SnapshotDiffManager spy = spy(snapshotDiffManager); - doNothing().when(spy).recordActivity(any(), any()); - doNothing().when(spy).updateProgress(anyString(), anyDouble()); - mockSnapshotLocalData(); - Set deltaFiles = spy.getDeltaFiles( - fromSnapshot, - toSnapshot, - Sets.newHashSet("cf1", "cf2"), - fromSnapshotInfo, - toSnapshotInfo, - false, - new TablePrefixInfo(Collections.emptyMap()), - snapDiffDir.getAbsolutePath(), diffJobKey); - assertEquals(deltaStrings, deltaFiles); - - rcFromSnapshot.close(); - rcToSnapshot.close(); - } - } - private Table getMockedTable( Map map, String tableName) throws IOException { @@ -746,7 +501,7 @@ public void testObjectIdMapWithTombstoneEntries(boolean nativeLibraryLoaded, Set newParentIds = Sets.newHashSet(); spy.addToObjectIdMap(toSnapshotTable, - fromSnapshotTable, Sets.newHashSet("dummy.sst"), + fromSnapshotTable, Sets.newHashSet(Paths.get("dummy.sst")), nativeLibraryLoaded, oldObjectIdKeyMap, newObjectIdKeyMap, objectIdsToCheck, Optional.of(oldParentIds), Optional.of(newParentIds), @@ -1562,84 +1317,6 @@ private void setupMocksForRunningASnapDiff( when(bucketInfoTable.get(bucketKey)).thenReturn(bucketInfo); } - @Test - public void testGetDeltaFilesWithFullDiff() throws IOException { - SnapshotDiffManager spy = spy(snapshotDiffManager); - UUID snap1 = UUID.randomUUID(); - OmSnapshot fromSnapshot = getMockedOmSnapshot(snap1); - Path fromSnapshotPath = fromSnapshot.getMetadataManager().getStore().getDbLocation().toPath(); - UUID snap2 = UUID.randomUUID(); - OmSnapshot toSnapshot = getMockedOmSnapshot(snap2); - Path toSnapshotPath = toSnapshot.getMetadataManager().getStore().getDbLocation().toPath(); - Mockito.doAnswer(invocation -> { - OmSnapshot snapshot = invocation.getArgument(0); - if (snapshot == fromSnapshot) { - Map inodeToFileMap = new HashMap<>(); - inodeToFileMap.put(1, new SstFileInfo("1", null, null, null)); - inodeToFileMap.put(2, new SstFileInfo("2", null, null, null)); - inodeToFileMap.put(3, new SstFileInfo("3", null, null, null)); - return inodeToFileMap; - } - if (snapshot == toSnapshot) { - Map inodeToFileMap = new HashMap<>(); - inodeToFileMap.put(1, new SstFileInfo("10", null, null, null)); - inodeToFileMap.put(2, new SstFileInfo("20", null, null, null)); - inodeToFileMap.put(4, new SstFileInfo("4", null, null, null)); - return inodeToFileMap; - } - return null; - }).when(spy).getSSTFileMapForSnapshot(Mockito.any(OmSnapshot.class), Mockito.anySet()); - doNothing().when(spy).recordActivity(any(), any()); - doNothing().when(spy).updateProgress(anyString(), anyDouble()); - String diffJobKey = snap1 + DELIMITER + snap2; - - Set deltaFiles = spy.getDeltaFiles(fromSnapshot, toSnapshot, Collections.emptySet(), snapshotInfo, - snapshotInfo, true, new TablePrefixInfo(Collections.emptyMap()), null, diffJobKey); - Assertions.assertEquals(Sets.newHashSet(fromSnapshotPath.resolve("3.sst").toAbsolutePath().toString(), - toSnapshotPath.resolve("4.sst").toAbsolutePath().toString()), deltaFiles); - } - - @Test - public void testGetSnapshotDiffReportHappyCase() throws Exception { - SnapshotInfo fromSnapInfo = snapshotInfo; - SnapshotInfo toSnapInfo = snapshotInfoList.get(0); - - Set testDeltaFiles = new HashSet<>(); - - SnapshotDiffManager spy = spy(snapshotDiffManager); - - doReturn(testDeltaFiles).when(spy).getDeltaFiles(any(OmSnapshot.class), - any(OmSnapshot.class), anySet(), eq(fromSnapInfo), eq(toSnapInfo), - eq(false), any(), anyString(), - anyString()); - - doReturn(testDeltaFiles).when(spy) - .getSSTFileSetForSnapshot(any(OmSnapshot.class), anySet()); - - doNothing().when(spy).addToObjectIdMap(eq(keyInfoTable), eq(keyInfoTable), - any(), anyBoolean(), any(), any(), any(), any(), any(), any(), anyString()); - doNothing().when(spy).checkReportsIntegrity(any(), anyInt(), anyInt()); - - doReturn(10L).when(spy).generateDiffReport(anyString(), - any(), any(), any(), any(), any(), any(), any(), - anyString(), anyString(), anyString(), anyString(), anyBoolean(), - any(), any(), any()); - doReturn(LEGACY).when(spy).getBucketLayout(VOLUME_NAME, BUCKET_NAME, - omMetadataManager); - - spy.getSnapshotDiffReport(VOLUME_NAME, BUCKET_NAME, fromSnapInfo.getName(), - toSnapInfo.getName(), 0, 1000, false, false); - - Thread.sleep(1000L); - spy.getSnapshotDiffReport(VOLUME_NAME, BUCKET_NAME, fromSnapInfo.getName(), - toSnapInfo.getName(), 0, 1000, false, false); - - SnapshotDiffJob snapDiffJob = getSnapshotDiffJobFromDb(fromSnapInfo, - toSnapInfo); - assertEquals(DONE, snapDiffJob.getStatus()); - assertEquals(10L, snapDiffJob.getTotalDiffEntries()); - } - /** * Tests that only QUEUED jobs are submitted to the executor and rest are * short-circuited based on previous one. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestCompositeDeltaDiffComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestCompositeDeltaDiffComputer.java new file mode 100644 index 000000000000..e8af3f84dd72 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestCompositeDeltaDiffComputer.java @@ -0,0 +1,726 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.diff.delta; + +import static org.apache.hadoop.hdds.utils.IOUtils.getINode; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anySet; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mockConstruction; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.UUID; +import java.util.function.Consumer; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus; +import org.apache.ozone.rocksdb.util.SstFileInfo; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; +import org.mockito.MockitoAnnotations; + +/** + * Unit tests for CompositeDeltaDiffComputer using Mockito.mockConstruction() + * to properly isolate and test fallback logic. + */ +public class TestCompositeDeltaDiffComputer { + + @TempDir + private Path tempDir; + + @Mock + private OmSnapshotManager omSnapshotManager; + + @Mock + private OMMetadataManager activeMetadataManager; + + @Mock + private OmSnapshotLocalDataManager localDataManager; + + @Mock + private RDBStore rdbStore; + + @Mock + private RocksDBCheckpointDiffer differ; + + @Mock + private Consumer activityReporter; + + private AutoCloseable mocks; + private Path deltaDirPath; + + @BeforeEach + public void setUp() throws IOException { + mocks = MockitoAnnotations.openMocks(this); + deltaDirPath = tempDir.resolve("delta"); + when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(localDataManager); + when(activeMetadataManager.getStore()).thenReturn(rdbStore); + when(rdbStore.getRocksDBCheckpointDiffer()).thenReturn(differ); + } + + @AfterEach + public void tearDown() throws Exception { + if (mocks != null) { + mocks.close(); + } + } + + /** + * Tests that RDBDifferComputer is created when fullDiff=false using mockConstruction. + */ + @Test + public void testRDBDifferComputerCreatedWhenNotFullDiff() throws IOException { + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class)) { + + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, false); + + // Verify RDBDifferComputer was constructed (fullDiff=false) + assertEquals(1, rdbDifferMock.constructed().size(), "RDBDifferComputer should be constructed"); + assertEquals(1, fullDiffMock.constructed().size(), "FullDiffComputer should always be constructed"); + + composite.close(); + } + } + + /** + * Tests that RDBDifferComputer is NOT created when fullDiff=true using mockConstruction. + */ + @Test + public void testRDBDifferComputerNotCreatedWhenFullDiff() throws IOException { + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class)) { + + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, true, false); + + // Verify RDBDifferComputer was NOT constructed (fullDiff=true) + assertEquals(0, rdbDifferMock.constructed().size(), "RDBDifferComputer should NOT " + + "be constructed when fullDiff=true"); + assertEquals(1, fullDiffMock.constructed().size(), "FullDiffComputer should always be constructed"); + + composite.close(); + } + } + + /** + * Tests successful RDBDifferComputer computation without fallback. + */ + @Test + public void testSuccessfulRDBDifferComputationWithoutFallback() throws IOException { + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = new TablePrefixInfo(ImmutableMap.of("keyTable", "a")); + + // Create expected results from RDBDiffer + Path sstFile1 = tempDir.resolve("rdb1.sst"); + Path sstFile2 = tempDir.resolve("rdb2.sst"); + Files.createFile(sstFile1); + Files.createFile(sstFile2); + SstFileInfo sstInfo1 = new SstFileInfo("rdb1.sst", "key1", "key2", "keyTable"); + SstFileInfo sstInfo2 = new SstFileInfo("rdb2.sst", "key3", "key4", "keyTable"); + Map> rdbDifferResult = new HashMap<>(); + rdbDifferResult.put(sstFile1, Pair.of(sstFile1, sstInfo1)); + rdbDifferResult.put(sstFile2, Pair.of(sstFile2, sstInfo2)); + + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class, + (mock, context) -> { + // Make RDBDifferComputer return results successfully + when(mock.computeDeltaFiles(any(), any(), anySet(), any())) + .thenReturn(Optional.of(rdbDifferResult)); + }); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class)) { + + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, false); + + Optional>> result = + composite.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + // Verify RDBDiffer results are returned + assertTrue(result.isPresent(), "Result should be present from RDBDiffer"); + assertEquals(2, result.get().size(), "Should have 2 files from RDBDiffer"); + assertEquals(rdbDifferResult, result.get(), "Should return RDBDifferComputer result"); + + // Verify RDBDifferComputer was called but NOT FullDiffComputer + RDBDifferComputer rdbDifferInstance = rdbDifferMock.constructed().get(0); + verify(rdbDifferInstance, times(1)).computeDeltaFiles(any(), any(), anySet(), any()); + + // Verify FullDiffComputer was NEVER called (no fallback needed) + FullDiffComputer fullDiffInstance = fullDiffMock.constructed().get(0); + verify(fullDiffInstance, times(0)).computeDeltaFiles(any(), any(), anySet(), any()); + + // Verify only DAG_WALK status was reported (no FULL_DIFF) + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(SubStatus.class); + verify(activityReporter, times(1)).accept(statusCaptor.capture()); + assertEquals(SubStatus.SST_FILE_DELTA_DAG_WALK, statusCaptor.getValue(), + "Only DAG_WALK should be reported when RDBDiffer succeeds"); + + composite.close(); + } + } + + /** + * Tests successful RDBDifferComputer with single file. + */ + @Test + public void testSuccessfulRDBDifferWithSingleFile() throws IOException { + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = new TablePrefixInfo(ImmutableMap.of("keyTable", "a")); + + Path sstFile = tempDir.resolve("single.sst"); + Files.createFile(sstFile); + SstFileInfo sstInfo = new SstFileInfo("single.sst", "key1", "key5", "keyTable"); + Map> rdbDifferResult = new HashMap<>(); + rdbDifferResult.put(sstFile, Pair.of(sstFile, sstInfo)); + + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class, + (mock, context) -> { + when(mock.computeDeltaFiles(any(), any(), anySet(), any())) + .thenReturn(Optional.of(rdbDifferResult)); + }); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class)) { + + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, false); + + Optional>> result = + composite.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + assertTrue(result.isPresent(), "Result should be present"); + assertEquals(1, result.get().size(), "Should have 1 file"); + + // Verify no fallback to FullDiff + FullDiffComputer fullDiffInstance = fullDiffMock.constructed().get(0); + verify(fullDiffInstance, times(0)).computeDeltaFiles(any(), any(), anySet(), any()); + + composite.close(); + } + } + + /** + * Tests successful RDBDifferComputer with multiple tables. + */ + @Test + public void testSuccessfulRDBDifferWithMultipleTables() throws IOException { + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable", "fileTable", "directoryTable"); + TablePrefixInfo tablePrefixInfo = new TablePrefixInfo(ImmutableMap.of( + "keyTable", "a", "fileTable", "b", "directoryTable", "c")); + + // Create files for different tables + Path keyFile = tempDir.resolve("key1.sst"); + Path fileFile = tempDir.resolve("file1.sst"); + Path dirFile = tempDir.resolve("dir1.sst"); + Files.createFile(keyFile); + Files.createFile(fileFile); + Files.createFile(dirFile); + + SstFileInfo keyInfo = new SstFileInfo("key1.sst", "key1", "key2", "keyTable"); + SstFileInfo fileInfo = new SstFileInfo("file1.sst", "file1", "file2", "fileTable"); + SstFileInfo dirInfo = new SstFileInfo("dir1.sst", "dir1", "dir2", "directoryTable"); + + Map> rdbDifferResult = new HashMap<>(); + rdbDifferResult.put(keyFile, Pair.of(keyFile, keyInfo)); + rdbDifferResult.put(fileFile, Pair.of(fileFile, fileInfo)); + rdbDifferResult.put(dirFile, Pair.of(dirFile, dirInfo)); + + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class, + (mock, context) -> { + when(mock.computeDeltaFiles(any(), any(), anySet(), any())) + .thenReturn(Optional.of(rdbDifferResult)); + }); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class)) { + + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, false); + + Optional>> result = + composite.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + assertTrue(result.isPresent(), "Result should be present"); + assertEquals(3, result.get().size(), "Should have 3 files from different tables"); + + // Verify RDBDiffer handled all tables without fallback + RDBDifferComputer rdbDifferInstance = rdbDifferMock.constructed().get(0); + verify(rdbDifferInstance, times(1)).computeDeltaFiles(any(), any(), anySet(), any()); + + FullDiffComputer fullDiffInstance = fullDiffMock.constructed().get(0); + verify(fullDiffInstance, times(0)).computeDeltaFiles(any(), any(), anySet(), any()); + + composite.close(); + } + } + + /** + * Tests successful RDBDifferComputer returning empty map (no changes). + */ + @Test + public void testSuccessfulRDBDifferWithNoChanges() throws IOException { + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = new TablePrefixInfo(ImmutableMap.of("keyTable", "a")); + + // RDBDiffer returns empty map (no differences, but successful computation) + Map> emptyResult = new HashMap<>(); + + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class, + (mock, context) -> { + when(mock.computeDeltaFiles(any(), any(), anySet(), any())) + .thenReturn(Optional.of(emptyResult)); + }); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class)) { + + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, false); + + Optional>> result = + composite.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + // Empty result is still a valid success case - no fallback needed + assertTrue(result.isPresent(), "Result should be present even if empty"); + assertEquals(0, result.get().size(), "Should have 0 files (no changes)"); + + // Verify no fallback occurred + FullDiffComputer fullDiffInstance = fullDiffMock.constructed().get(0); + verify(fullDiffInstance, times(0)).computeDeltaFiles(any(), any(), anySet(), any()); + + // Only DAG_WALK status should be reported + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(SubStatus.class); + verify(activityReporter, times(1)).accept(statusCaptor.capture()); + assertEquals(SubStatus.SST_FILE_DELTA_DAG_WALK, statusCaptor.getValue()); + + composite.close(); + } + } + + /** + * Tests fallback from RDBDifferComputer to FullDiffComputer using mockConstruction. + */ + @Test + public void testFallbackFromRDBDifferToFullDiff() throws IOException { + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = new TablePrefixInfo(ImmutableMap.of("keyTable", "a")); + + // Create expected results + Path sstFile = tempDir.resolve("test.sst"); + Files.createFile(sstFile); + SstFileInfo sstInfo = new SstFileInfo("test.sst", "key1", "key2", "keyTable"); + Map> fullDiffResult = new HashMap<>(); + fullDiffResult.put(sstFile, Pair.of(sstFile, sstInfo)); + + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class, + (mock, context) -> { + // Make RDBDifferComputer return empty to trigger fallback + when(mock.computeDeltaFiles(any(), any(), anySet(), any())) + .thenReturn(Optional.empty()); + }); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class, + (mock, context) -> { + // Make FullDiffComputer return results + when(mock.computeDeltaFiles(any(), any(), anySet(), any())) + .thenReturn(Optional.of(fullDiffResult)); + })) { + + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, false); + + Optional>> result = + composite.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + // Verify fallback occurred + assertTrue(result.isPresent(), "Result should be present from fallback"); + assertEquals(fullDiffResult, result.get(), "Should return FullDiffComputer result"); + + // Verify both computers were called + RDBDifferComputer rdbDifferInstance = rdbDifferMock.constructed().get(0); + FullDiffComputer fullDiffInstance = fullDiffMock.constructed().get(0); + + verify(rdbDifferInstance, times(1)).computeDeltaFiles(any(), any(), anySet(), any()); + verify(fullDiffInstance, times(1)).computeDeltaFiles(any(), any(), anySet(), any()); + + // Verify activity statuses were reported + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(SubStatus.class); + verify(activityReporter, times(2)).accept(statusCaptor.capture()); + List statuses = statusCaptor.getAllValues(); + assertEquals(SubStatus.SST_FILE_DELTA_DAG_WALK, statuses.get(0)); + assertEquals(SubStatus.SST_FILE_DELTA_FULL_DIFF, statuses.get(1)); + + composite.close(); + } + } + + /** + * Tests fallback on exception using mockConstruction. + */ + @Test + public void testFallbackOnException() throws IOException { + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = new TablePrefixInfo(ImmutableMap.of("keyTable", "a")); + + Path sstFile = tempDir.resolve("test2.sst"); + Files.createFile(sstFile); + SstFileInfo sstInfo = new SstFileInfo("test2.sst", "key3", "key4", "keyTable"); + Map> fullDiffResult = new HashMap<>(); + fullDiffResult.put(sstFile, Pair.of(sstFile, sstInfo)); + + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class, + (mock, context) -> { + // Make RDBDifferComputer throw exception to trigger fallback + when(mock.computeDeltaFiles(any(), any(), anySet(), any())) + .thenThrow(new RuntimeException("Test exception")); + }); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class, + (mock, context) -> { + // Make FullDiffComputer return results + when(mock.computeDeltaFiles(any(), any(), anySet(), any())) + .thenReturn(Optional.of(fullDiffResult)); + })) { + + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, false); + + Optional>> result = + composite.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + // Verify fallback occurred + assertTrue(result.isPresent(), "Result should be present from fallback after exception"); + + // Verify activity statuses + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(SubStatus.class); + verify(activityReporter, times(2)).accept(statusCaptor.capture()); + List statuses = statusCaptor.getAllValues(); + assertEquals(SubStatus.SST_FILE_DELTA_DAG_WALK, statuses.get(0)); + assertEquals(SubStatus.SST_FILE_DELTA_FULL_DIFF, statuses.get(1)); + + composite.close(); + } + } + + /** + * Tests that FullDiffComputer is used directly when fullDiff=true. + */ + @Test + public void testFullDiffOnlyMode() throws IOException { + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = new TablePrefixInfo(ImmutableMap.of("keyTable", "a")); + + Path sstFile = tempDir.resolve("test3.sst"); + Files.createFile(sstFile); + SstFileInfo sstInfo = new SstFileInfo("test3.sst", "key5", "key6", "keyTable"); + Map> fullDiffResult = new HashMap<>(); + fullDiffResult.put(sstFile, Pair.of(sstFile, sstInfo)); + + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class, + (mock, context) -> { + when(mock.computeDeltaFiles(any(), any(), anySet(), any())) + .thenReturn(Optional.of(fullDiffResult)); + })) { + + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, true, false); + + Optional>> result = + composite.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + // Verify RDBDifferComputer was never constructed or called + assertEquals(0, rdbDifferMock.constructed().size(), "RDBDifferComputer should not be constructed"); + + // Verify FullDiffComputer was used + assertTrue(result.isPresent(), "Result should be present"); + FullDiffComputer fullDiffInstance = fullDiffMock.constructed().get(0); + verify(fullDiffInstance, times(1)).computeDeltaFiles(any(), any(), anySet(), any()); + + // Verify only FULL_DIFF status was reported + ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(SubStatus.class); + verify(activityReporter, times(1)).accept(statusCaptor.capture()); + assertEquals(SubStatus.SST_FILE_DELTA_FULL_DIFF, statusCaptor.getValue()); + + composite.close(); + } + } + + /** + * Tests proper cleanup of both computers. + */ + @Test + public void testCloseCallsBothComputers() throws IOException { + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class)) { + + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, false); + + composite.close(); + + // Verify close was called on both + RDBDifferComputer rdbDifferInstance = rdbDifferMock.constructed().get(0); + FullDiffComputer fullDiffInstance = fullDiffMock.constructed().get(0); + + verify(rdbDifferInstance, times(1)).close(); + verify(fullDiffInstance, times(1)).close(); + } + } + + /** + * Tests that nonNativeDiff flag is properly passed to constructor. + * Verifies CompositeDeltaDiffComputer can be created with nonNativeDiff=true. + */ + @Test + public void testNonNativeDiffFlagInConstructor() throws IOException { + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class)) { + + // Create with nonNativeDiff = true + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, true); + + // Verify construction succeeds and both computers are created + assertEquals(1, rdbDifferMock.constructed().size(), "RDBDifferComputer should be created"); + assertEquals(1, fullDiffMock.constructed().size(), "FullDiffComputer should be created"); + + composite.close(); + } + } + + /** + * Tests that nonNativeDiff flag works correctly when disabled. + * Verifies CompositeDeltaDiffComputer can be created with nonNativeDiff=false. + */ + @Test + public void testNonNativeDiffDisabled() throws IOException { + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class)) { + + // Create with nonNativeDiff = false (default behavior) + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, false); + + // Verify construction succeeds and both computers are created + assertEquals(1, rdbDifferMock.constructed().size(), "RDBDifferComputer should be created"); + assertEquals(1, fullDiffMock.constructed().size(), "FullDiffComputer should be created"); + + composite.close(); + } + } + + /** + * Tests nonNativeDiff mode with computeDeltaFiles - verifies fromSnapshot files are added. + * In nonNativeDiff mode, SST files from fromSnapshot are added to the delta to handle deletes. + */ + @Test + public void testNonNativeDiffComputeDeltaFilesEnabled() throws IOException { + // Given nonNativeDiff is enabled and we have snapshots + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = new TablePrefixInfo(ImmutableMap.of("keyTable", "a")); + + // Setup fromSnapshot SST files + Path fromDbPath = tempDir.resolve("fromDb"); + Files.createDirectories(fromDbPath); + Path fromSstFile1 = fromDbPath.resolve("000001.sst"); + Path fromSstFile2 = fromDbPath.resolve("000002.sst"); + Files.createFile(fromSstFile1); + Files.createFile(fromSstFile2); + + SstFileInfo fromSstInfo1 = new SstFileInfo("000001", "a/key1", "a/key100", "keyTable"); + SstFileInfo fromSstInfo2 = new SstFileInfo("000002", "a/key101", "a/key200", "keyTable"); + Set fromSnapshotSstFiles = ImmutableSet.of(fromSstInfo1, fromSstInfo2); + + // Mock fromSnapshot + OmSnapshot fromSnap = org.mockito.Mockito.mock(OmSnapshot.class); + OMMetadataManager fromMetaMgr = org.mockito.Mockito.mock(OMMetadataManager.class); + RDBStore fromRdbStore = org.mockito.Mockito.mock(RDBStore.class); + when(fromSnap.getMetadataManager()).thenReturn(fromMetaMgr); + when(fromMetaMgr.getStore()).thenReturn(fromRdbStore); + when(fromRdbStore.getDbLocation()).thenReturn(fromDbPath.toFile()); + + @SuppressWarnings("unchecked") + UncheckedAutoCloseableSupplier fromSnapSupplier = + (UncheckedAutoCloseableSupplier) org.mockito.Mockito.mock(UncheckedAutoCloseableSupplier.class); + when(fromSnapSupplier.get()).thenReturn(fromSnap); + when(omSnapshotManager.getActiveSnapshot(eq("vol1"), eq("bucket1"), eq("snap1"))) + .thenReturn(fromSnapSupplier); + + // Mock RDBDifferComputer to return a result + Map> rdbDifferResult = new HashMap<>(); + Path toSstFile = tempDir.resolve("000003.sst"); + Files.createFile(toSstFile); + SstFileInfo toSstInfo = new SstFileInfo("000003.sst", "a/key1", "a/key50", "keyTable"); + rdbDifferResult.put(toSstFile, Pair.of(deltaDirPath.resolve("000003.sst"), toSstInfo)); + + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class, + (mock, context) -> { + when(mock.computeDeltaFiles(any(), any(), anySet(), any())) + .thenReturn(Optional.of(rdbDifferResult)); + }); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class); + MockedStatic fullDiffStaticMock = mockStatic(FullDiffComputer.class)) { + + // Mock the static method getSSTFileSetForSnapshot + fullDiffStaticMock.when(() -> FullDiffComputer.getSSTFileSetForSnapshot(any(), anySet(), any())) + .thenReturn(fromSnapshotSstFiles); + + // When we create CompositeDeltaDiffComputer with nonNativeDiff=true + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, true); + + // Then computeDeltaFiles should complete successfully and include fromSnapshot files + Optional>> result = + composite.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + // Result should be present with both RDBDiffer result AND fromSnapshot files + assertTrue(result.isPresent(), "Result should be present"); + Map> deltaFiles = result.get(); + + // Should have 1 from RDBDiffer + 2 from fromSnapshot = 3 total + assertEquals(3, deltaFiles.size(), + "Should have 3 files (1 RDBDiffer + 2 fromSnapshot), got: " + deltaFiles.size()); + assertEquals(ImmutableSet.of(fromSstFile1, fromSstFile2, toSstFile), deltaFiles.keySet()); + Map infoMap = ImmutableMap.of(fromSstFile1, fromSstInfo1, fromSstFile2, fromSstInfo2, + toSstFile, toSstInfo); + for (Map.Entry> entry : deltaFiles.entrySet()) { + assertEquals(infoMap.get(entry.getKey()), entry.getValue().getRight()); + assertEquals(deltaDirPath.toAbsolutePath(), entry.getValue().getLeft().toAbsolutePath().getParent()); + } + assertEquals(getINode(fromSstFile1), getINode(deltaFiles.get(fromSstFile1).getLeft())); + assertEquals(getINode(fromSstFile2), getINode(deltaFiles.get(fromSstFile2).getLeft())); + + composite.close(); + } + } + + /** + * Tests nonNativeDiff mode disabled with computeDeltaFiles. + * Verifies normal behavior when nonNativeDiff=false. + */ + @Test + public void testNonNativeDiffComputeDeltaFilesDisabled() throws IOException { + // Given nonNativeDiff is disabled + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = new TablePrefixInfo(ImmutableMap.of("keyTable", "a")); + + // Mock RDBDifferComputer to return a result + Map> rdbDifferResult = new HashMap<>(); + Path sstFile = tempDir.resolve("000001.sst"); + Files.createFile(sstFile); + SstFileInfo sstInfo = new SstFileInfo("000001.sst", "a/key1", "a/key50", "keyTable"); + rdbDifferResult.put(sstFile, Pair.of(deltaDirPath.resolve("000001.sst"), sstInfo)); + + try (MockedConstruction rdbDifferMock = mockConstruction(RDBDifferComputer.class, + (mock, context) -> { + when(mock.computeDeltaFiles(any(), any(), anySet(), any())) + .thenReturn(Optional.of(rdbDifferResult)); + }); + MockedConstruction fullDiffMock = mockConstruction(FullDiffComputer.class)) { + + // When we create CompositeDeltaDiffComputer with nonNativeDiff=false + CompositeDeltaDiffComputer composite = new CompositeDeltaDiffComputer( + omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter, false, false); + + // Then computeDeltaFiles should complete successfully with RDBDiffer result + Optional>> result = + composite.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + // Result should contain RDBDiffer result + assertTrue(result.isPresent(), "Result should be present"); + Map> deltaFiles = result.get(); + assertEquals(1, deltaFiles.size(), "Should have RDBDiffer result"); + assertTrue(deltaFiles.containsKey(sstFile), "Should contain the SST file"); + + composite.close(); + } + } + + // Helper methods + + private SnapshotInfo createMockSnapshotInfo(String volumeName, String bucketName, + String snapshotName, UUID snapshotId) { + SnapshotInfo.Builder builder = SnapshotInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setName(snapshotName) + .setSnapshotId(snapshotId) + .setDbTxSequenceNumber(100L); + return builder.build(); + } +} + diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java new file mode 100644 index 000000000000..b4ba058a43c2 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java @@ -0,0 +1,535 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.diff.delta; + +import static org.apache.hadoop.hdds.utils.IOUtils.getINode; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anySet; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.TreeMap; +import java.util.UUID; +import java.util.function.Consumer; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.TablePrefixInfo; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.SubStatus; +import org.apache.ozone.rocksdb.util.SstFileInfo; +import org.apache.ozone.rocksdiff.DifferSnapshotInfo; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +/** + * Unit tests for RDBDifferComputer. + */ +public class TestRDBDifferComputer { + + @TempDir + private Path tempDir; + + @Mock + private OmSnapshotManager omSnapshotManager; + + @Mock + private OMMetadataManager activeMetadataManager; + + @Mock + private OmSnapshotLocalDataManager localDataManager; + + @Mock + private RDBStore rdbStore; + + @Mock + private RocksDBCheckpointDiffer differ; + + @Mock + private Consumer activityReporter; + + private AutoCloseable mocks; + private Path deltaDirPath; + private RDBDifferComputer rdbDifferComputer; + + @BeforeEach + public void setUp() throws IOException { + mocks = MockitoAnnotations.openMocks(this); + deltaDirPath = tempDir.resolve("delta"); + when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(localDataManager); + when(activeMetadataManager.getStore()).thenReturn(rdbStore); + when(rdbStore.getRocksDBCheckpointDiffer()).thenReturn(differ); + } + + @AfterEach + public void tearDown() throws Exception { + if (rdbDifferComputer != null) { + rdbDifferComputer.close(); + } + if (mocks != null) { + mocks.close(); + } + } + + /** + * Tests that the constructor creates RDBDifferComputer successfully with differ. + */ + @Test + public void testConstructorWithDiffer() throws IOException { + rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + assertNotNull(rdbDifferComputer, "RDBDifferComputer should be created"); + assertTrue(Files.exists(deltaDirPath), "Delta directory should be created"); + verify(activeMetadataManager, times(1)).getStore(); + verify(rdbStore, times(1)).getRocksDBCheckpointDiffer(); + } + + /** + * Tests constructor when differ is null (fallback scenario). + */ + @Test + public void testConstructorWithNullDiffer() throws IOException { + when(rdbStore.getRocksDBCheckpointDiffer()).thenReturn(null); + + rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + assertNotNull(rdbDifferComputer, "RDBDifferComputer should be created even with null differ"); + assertTrue(Files.exists(deltaDirPath), "Delta directory should be created"); + } + + /** + * Tests computeDeltaFiles with successful differ computation. + */ + @Test + public void testComputeDeltaFilesWithDiffer() throws IOException { + rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = mock(TablePrefixInfo.class); + + // Mock snapshot local data + ReadableOmSnapshotLocalDataProvider snapProvider = mock(ReadableOmSnapshotLocalDataProvider.class); + OmSnapshotLocalData fromSnapshotLocalData = createMockSnapshotLocalData(fromSnapshotId, 1); + OmSnapshotLocalData toSnapshotLocalData = createMockSnapshotLocalData(toSnapshotId, 2); + + when(snapProvider.getPreviousSnapshotLocalData()).thenReturn(fromSnapshotLocalData); + when(snapProvider.getSnapshotLocalData()).thenReturn(toSnapshotLocalData); + when(localDataManager.getOmSnapshotLocalData(toSnapshotId, fromSnapshotId)).thenReturn(snapProvider); + + // Create mock SST files + Path sstFile1 = tempDir.resolve("sst1.sst"); + Path sstFile2 = tempDir.resolve("sst2.sst"); + Files.createFile(sstFile1); + Files.createFile(sstFile2); + + SstFileInfo sstFileInfo1 = new SstFileInfo("sst1.sst", "key1", "key2", "keyTable"); + SstFileInfo sstFileInfo2 = new SstFileInfo("sst2.sst", "key3", "key4", "keyTable"); + + Map differResult = new HashMap<>(); + differResult.put(sstFile1, sstFileInfo1); + differResult.put(sstFile2, sstFileInfo2); + + when(differ.getSSTDiffListWithFullPath(any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + any(Map.class), any(TablePrefixInfo.class), anySet())).thenReturn(Optional.of(differResult)); + + Optional>> result = + rdbDifferComputer.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + assertTrue(result.isPresent(), "Result should be present"); + assertEquals(2, result.get().size(), "Should have 2 delta files"); + assertTrue(result.get().containsKey(sstFile1), "Should contain first SST file"); + assertTrue(result.get().containsKey(sstFile2), "Should contain second SST file"); + + // Verify links were created in delta directory + for (Map.Entry> entry : result.get().entrySet()) { + Path actualPath = entry.getKey(); + Path link = entry.getValue().getLeft(); + assertEquals(differResult.get(actualPath), entry.getValue().getValue()); + assertTrue(link.startsWith(deltaDirPath), "Link should be in delta directory"); + assertTrue(Files.exists(link), "Link should exist"); + assertEquals(getINode(actualPath), getINode(link)); + } + + verify(snapProvider, times(1)).close(); + } + + /** + * Tests computeDeltaFiles when differ returns empty. + */ + @Test + public void testComputeDeltaFilesWithEmptyDifferResult() throws IOException { + rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = mock(TablePrefixInfo.class); + + // Mock snapshot local data + ReadableOmSnapshotLocalDataProvider snapProvider = mock(ReadableOmSnapshotLocalDataProvider.class); + OmSnapshotLocalData fromSnapshotLocalData = createMockSnapshotLocalData(fromSnapshotId, 1); + OmSnapshotLocalData toSnapshotLocalData = createMockSnapshotLocalData(toSnapshotId, 2); + + when(snapProvider.getPreviousSnapshotLocalData()).thenReturn(fromSnapshotLocalData); + when(snapProvider.getSnapshotLocalData()).thenReturn(toSnapshotLocalData); + when(localDataManager.getOmSnapshotLocalData(toSnapshotId, fromSnapshotId)).thenReturn(snapProvider); + + when(differ.getSSTDiffListWithFullPath(any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + any(Map.class), any(TablePrefixInfo.class), anySet())).thenReturn(Optional.empty()); + + Optional>> result = + rdbDifferComputer.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + assertFalse(result.isPresent(), "Result should be empty when differ returns empty"); + verify(snapProvider, times(1)).close(); + } + + /** + * Tests computeDeltaFiles when differ is null. + */ + @Test + public void testComputeDeltaFilesWithNullDiffer() throws IOException { + when(rdbStore.getRocksDBCheckpointDiffer()).thenReturn(null); + rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", UUID.randomUUID()); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", UUID.randomUUID()); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = mock(TablePrefixInfo.class); + + Optional>> result = + rdbDifferComputer.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + assertFalse(result.isPresent(), "Result should be empty when differ is null"); + } + + /** + * Tests computeDeltaFiles with multiple tables. + */ + @Test + public void testComputeDeltaFilesWithMultipleTables() throws IOException { + rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable", "fileTable", "directoryTable"); + TablePrefixInfo tablePrefixInfo = mock(TablePrefixInfo.class); + + // Mock snapshot local data + ReadableOmSnapshotLocalDataProvider snapProvider = mock(ReadableOmSnapshotLocalDataProvider.class); + OmSnapshotLocalData fromSnapshotLocalData = createMockSnapshotLocalData(fromSnapshotId, 1); + OmSnapshotLocalData toSnapshotLocalData = createMockSnapshotLocalData(toSnapshotId, 2); + + when(snapProvider.getPreviousSnapshotLocalData()).thenReturn(fromSnapshotLocalData); + when(snapProvider.getSnapshotLocalData()).thenReturn(toSnapshotLocalData); + when(localDataManager.getOmSnapshotLocalData(toSnapshotId, fromSnapshotId)).thenReturn(snapProvider); + + // Create mock SST files for different tables + Path sstFile1 = tempDir.resolve("key1.sst"); + Path sstFile2 = tempDir.resolve("file1.sst"); + Path sstFile3 = tempDir.resolve("dir1.sst"); + Files.createFile(sstFile1); + Files.createFile(sstFile2); + Files.createFile(sstFile3); + + SstFileInfo sstFileInfo1 = new SstFileInfo("key1.sst", "key1", "key2", "keyTable"); + SstFileInfo sstFileInfo2 = new SstFileInfo("file1.sst", "file1", "file2", "fileTable"); + SstFileInfo sstFileInfo3 = new SstFileInfo("dir1.sst", "dir1", "dir2", "directoryTable"); + + Map differResult = new HashMap<>(); + differResult.put(sstFile1, sstFileInfo1); + differResult.put(sstFile2, sstFileInfo2); + differResult.put(sstFile3, sstFileInfo3); + + when(differ.getSSTDiffListWithFullPath(any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + any(Map.class), any(TablePrefixInfo.class), anySet())).thenReturn(Optional.of(differResult)); + + Optional>> result = + rdbDifferComputer.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + assertTrue(result.isPresent(), "Result should be present"); + assertEquals(3, result.get().size(), "Should have 3 delta files from different tables"); + } + + /** + * Tests computeDeltaFiles with version mapping. + */ + @Test + public void testComputeDeltaFilesWithVersionMapping() throws IOException { + rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = mock(TablePrefixInfo.class); + + // Mock snapshot local data with version mapping + ReadableOmSnapshotLocalDataProvider snapProvider = mock(ReadableOmSnapshotLocalDataProvider.class); + OmSnapshotLocalData fromSnapshotLocalData = createMockSnapshotLocalData(fromSnapshotId, 1); + OmSnapshotLocalData toSnapshotLocalData = createMockSnapshotLocalDataWithVersions(toSnapshotId, 2); + + when(snapProvider.getPreviousSnapshotLocalData()).thenReturn(fromSnapshotLocalData); + when(snapProvider.getSnapshotLocalData()).thenReturn(toSnapshotLocalData); + when(localDataManager.getOmSnapshotLocalData(toSnapshotId, fromSnapshotId)).thenReturn(snapProvider); + + Path sstFile = tempDir.resolve("sst1.sst"); + Files.createFile(sstFile); + SstFileInfo sstFileInfo = new SstFileInfo("sst1.sst", "key1", "key2", "keyTable"); + + Map differResult = new HashMap<>(); + differResult.put(sstFile, sstFileInfo); + + when(differ.getSSTDiffListWithFullPath(any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + any(Map.class), any(TablePrefixInfo.class), anySet())).thenReturn(Optional.of(differResult)); + + Optional>> result = + rdbDifferComputer.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + + assertTrue(result.isPresent(), "Result should be present"); + + // Verify that version map was passed to differ + ArgumentCaptor> versionMapCaptor = ArgumentCaptor.forClass(Map.class); + verify(differ).getSSTDiffListWithFullPath(any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + versionMapCaptor.capture(), any(TablePrefixInfo.class), anySet()); + + Map capturedVersionMap = versionMapCaptor.getValue(); + assertNotNull(capturedVersionMap, "Version map should not be null"); + assertEquals(ImmutableMap.of(0, 0, 1, 0, 2, 1), capturedVersionMap); + } + + /** + * Tests that getDSIFromSI throws exception when no versions found. + */ + @Test + public void testGetDSIFromSIWithNoVersions() throws IOException { + rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + UUID snapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", snapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", UUID.randomUUID()); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = mock(TablePrefixInfo.class); + + // Mock snapshot local data with empty versions + ReadableOmSnapshotLocalDataProvider snapProvider = mock(ReadableOmSnapshotLocalDataProvider.class); + OmSnapshotLocalData fromSnapshotLocalData = mock(OmSnapshotLocalData.class); + OmSnapshotLocalData toSnapshotLocalData = createMockSnapshotLocalData(UUID.randomUUID(), 1); + + when(fromSnapshotLocalData.getSnapshotId()).thenReturn(snapshotId); + when(fromSnapshotLocalData.getVersionSstFileInfos()).thenReturn(Collections.emptyMap()); + + when(snapProvider.getPreviousSnapshotLocalData()).thenReturn(fromSnapshotLocalData); + when(snapProvider.getSnapshotLocalData()).thenReturn(toSnapshotLocalData); + when(localDataManager.getOmSnapshotLocalData(any(UUID.class), any(UUID.class))).thenReturn(snapProvider); + + assertThrows(IOException.class, () -> + rdbDifferComputer.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo), + "Should throw IOException when no versions found"); + } + + /** + * Tests that close properly cleans up resources. + */ + @Test + public void testClose() throws IOException { + rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + assertTrue(Files.exists(deltaDirPath), "Delta directory should exist"); + + rdbDifferComputer.close(); + + assertFalse(Files.exists(deltaDirPath), "Delta directory should be cleaned up after close"); + } + + /** + * Tests computeDeltaFiles with IOException from differ. + */ + @Test + public void testComputeDeltaFilesWithIOException() throws IOException { + rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = mock(TablePrefixInfo.class); + + // Mock snapshot local data + ReadableOmSnapshotLocalDataProvider snapProvider = mock(ReadableOmSnapshotLocalDataProvider.class); + OmSnapshotLocalData fromSnapshotLocalData = createMockSnapshotLocalData(fromSnapshotId, 1); + OmSnapshotLocalData toSnapshotLocalData = createMockSnapshotLocalData(toSnapshotId, 2); + + when(snapProvider.getPreviousSnapshotLocalData()).thenReturn(fromSnapshotLocalData); + when(snapProvider.getSnapshotLocalData()).thenReturn(toSnapshotLocalData); + when(localDataManager.getOmSnapshotLocalData(toSnapshotId, fromSnapshotId)).thenReturn(snapProvider); + + when(differ.getSSTDiffListWithFullPath(any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + any(Map.class), any(TablePrefixInfo.class), anySet())) + .thenThrow(new IOException("Test exception")); + + assertThrows(IOException.class, () -> + rdbDifferComputer.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo), + "Should propagate IOException from differ"); + + verify(snapProvider, times(1)).close(); + } + + /** + * Tests that differ operations are synchronized. + */ + @Test + public void testDifferSynchronization() throws IOException { + rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, + deltaDirPath, activityReporter); + + UUID fromSnapshotId = UUID.randomUUID(); + UUID toSnapshotId = UUID.randomUUID(); + SnapshotInfo fromSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap1", fromSnapshotId); + SnapshotInfo toSnapshot = createMockSnapshotInfo("vol1", "bucket1", "snap2", toSnapshotId); + Set tablesToLookup = ImmutableSet.of("keyTable"); + TablePrefixInfo tablePrefixInfo = mock(TablePrefixInfo.class); + + // Mock snapshot local data + ReadableOmSnapshotLocalDataProvider snapProvider = mock(ReadableOmSnapshotLocalDataProvider.class); + OmSnapshotLocalData fromSnapshotLocalData = createMockSnapshotLocalData(fromSnapshotId, 1); + OmSnapshotLocalData toSnapshotLocalData = createMockSnapshotLocalData(toSnapshotId, 2); + + when(snapProvider.getPreviousSnapshotLocalData()).thenReturn(fromSnapshotLocalData); + when(snapProvider.getSnapshotLocalData()).thenReturn(toSnapshotLocalData); + when(localDataManager.getOmSnapshotLocalData(toSnapshotId, fromSnapshotId)).thenReturn(snapProvider); + + when(differ.getSSTDiffListWithFullPath(any(DifferSnapshotInfo.class), any(DifferSnapshotInfo.class), + any(Map.class), any(TablePrefixInfo.class), anySet())).thenReturn(Optional.empty()); + + // Multiple calls should work correctly (synchronized access to differ) + for (int i = 0; i < 3; i++) { + Optional>> result = + rdbDifferComputer.computeDeltaFiles(fromSnapshot, toSnapshot, tablesToLookup, tablePrefixInfo); + assertFalse(result.isPresent(), "Result should be empty"); + } + + verify(differ, times(3)).getSSTDiffListWithFullPath(any(DifferSnapshotInfo.class), + any(DifferSnapshotInfo.class), any(Map.class), any(TablePrefixInfo.class), anySet()); + } + + // Helper methods + + private SnapshotInfo createMockSnapshotInfo(String volumeName, String bucketName, + String snapshotName, UUID snapshotId) { + SnapshotInfo.Builder builder = SnapshotInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setName(snapshotName) + .setSnapshotId(snapshotId) + .setDbTxSequenceNumber(100L); + return builder.build(); + } + + private OmSnapshotLocalData createMockSnapshotLocalData(UUID snapshotId, int version) { + OmSnapshotLocalData localData = mock(OmSnapshotLocalData.class); + when(localData.getSnapshotId()).thenReturn(snapshotId); + + // Create version SST file info + List sstFiles = new ArrayList<>(); + sstFiles.add(new SstFileInfo("file1.sst", "key1", "key2", "keyTable")); + + VersionMeta versionMeta = new VersionMeta(0, sstFiles); + Map versionMap = new TreeMap<>(); + versionMap.put(version, versionMeta); + + when(localData.getVersionSstFileInfos()).thenReturn(versionMap); + when(localData.getVersion()).thenReturn(version); + + return localData; + } + + private OmSnapshotLocalData createMockSnapshotLocalDataWithVersions(UUID snapshotId, int version) { + OmSnapshotLocalData localData = mock(OmSnapshotLocalData.class); + when(localData.getSnapshotId()).thenReturn(snapshotId); + + // Create multiple versions + Map versionMap = new TreeMap<>(); + for (int i = 0; i <= version; i++) { + List sstFiles = new ArrayList<>(); + sstFiles.add(new SstFileInfo("file" + i + ".sst", "key" + i, "key" + (i + 1), "keyTable")); + VersionMeta versionMeta = new VersionMeta(i > 0 ? i - 1 : 0, sstFiles); + versionMap.put(i, versionMeta); + } + + when(localData.getVersionSstFileInfos()).thenReturn(versionMap); + when(localData.getVersion()).thenReturn(version); + + return localData; + } +} + + + + + From 9b2d4f89732919d5b234160928c7313339dc17ed Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 17 Nov 2025 09:59:49 -0500 Subject: [PATCH 117/126] HDDS-13929. Fix PMD Change-Id: I857831bebe3a733d28998606cbf3f1b0676b2ba5 --- .../hadoop/ozone/om/OmSnapshotManager.java | 4 +-- .../om/snapshot/SnapshotDiffManager.java | 19 -------------- .../om/snapshot/TestSnapshotDiffManager.java | 26 ++----------------- 3 files changed, 4 insertions(+), 45 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 3e6ccf771dc8..b79282b1aafa 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -312,8 +312,8 @@ public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { this.snapshotCache = new SnapshotCache(loader, softCacheSize, ozoneManager.getMetrics(), cacheCleanupServiceInterval, compactNonSnapshotDiffTables, ozoneManager.getMetadataManager().getLock()); - this.snapshotDiffManager = new SnapshotDiffManager(snapshotDiffDb, differ, - ozoneManager, snapshotLocalDataManager, snapDiffJobCf, snapDiffReportCf, + this.snapshotDiffManager = new SnapshotDiffManager(snapshotDiffDb, + ozoneManager, snapDiffJobCf, snapDiffReportCf, columnFamilyOptions, codecRegistry); diffCleanupServiceInterval = ozoneManager.getConfiguration() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 8271dd6315b2..fa26953af2f5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -152,7 +152,6 @@ public class SnapshotDiffManager implements AutoCloseable { private static final String MODIFY_DIFF_TABLE_SUFFIX = "-modify-diff"; private final ManagedRocksDB db; - private final RocksDBCheckpointDiffer differ; private final OzoneManager ozoneManager; private final OMMetadataManager activeOmMetadataManager; private final CodecRegistry codecRegistry; @@ -196,22 +195,17 @@ public class SnapshotDiffManager implements AutoCloseable { (SnapshotInfo fromSnapshotInfo, SnapshotInfo toSnapshotInfo) -> fromSnapshotInfo.getSnapshotId() + DELIMITER + toSnapshotInfo.getSnapshotId(); - private final OmSnapshotLocalDataManager snapshotLocalDataManager; @SuppressWarnings("parameternumber") public SnapshotDiffManager(ManagedRocksDB db, - RocksDBCheckpointDiffer differ, OzoneManager ozoneManager, - OmSnapshotLocalDataManager snapshotLocalDataManager, ColumnFamilyHandle snapDiffJobCfh, ColumnFamilyHandle snapDiffReportCfh, ManagedColumnFamilyOptions familyOptions, CodecRegistry codecRegistry) { this.db = db; - this.differ = differ; this.ozoneManager = ozoneManager; this.activeOmMetadataManager = ozoneManager.getMetadataManager(); - this.snapshotLocalDataManager = snapshotLocalDataManager; this.familyOptions = familyOptions; this.codecRegistry = codecRegistry; this.defaultWaitTime = ozoneManager.getConfiguration().getTimeDuration( @@ -340,19 +334,6 @@ private void createEmptySnapDiffDir(Path path) { } } - private void deleteDir(Path path) { - if (path == null || Files.notExists(path)) { - return; - } - - try { - PathUtils.deleteDirectory(path); - } catch (IOException e) { - // TODO: [SNAPSHOT] Fail gracefully - throw new IllegalStateException(e); - } - } - /** * Gets the report key for a particular index of snapshot diff job. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index c50d32e5b9b2..440081f269a7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -140,7 +140,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ExitUtil; import org.apache.ozone.rocksdb.util.SstFileSetReader; -import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.TimeDuration; import org.junit.jupiter.api.AfterEach; @@ -162,7 +161,6 @@ import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; -import org.rocksdb.RocksIterator; /** * Tests for SnapshotDiffManager. @@ -189,10 +187,6 @@ public class TestSnapshotDiffManager { private final OMMetrics omMetrics = OMMetrics.create(); @TempDir private File dbDir; - @TempDir - private File snapDiffDir; - @Mock - private RocksDBCheckpointDiffer differ; @Mock private OMMetadataManager omMetadataManager; @Mock @@ -210,15 +204,6 @@ public class TestSnapshotDiffManager { @Mock private RDBStore dbStore; - @Mock - private RocksIterator jobTableIterator; - - @Mock - private OmSnapshotLocalDataManager localDataManager; - - @Mock - private OmSnapshotManager omSnapshotManager; - private static CodecRegistry codecRegistry; private final BiFunction @@ -268,7 +253,6 @@ public void init() throws RocksDBException, IOException, ExecutionException { String snapshotNamePrefix = "snap-"; String snapshotPath = "snapshotPath"; - String snapshotCheckpointDir = "snapshotCheckpointDir"; UUID baseSnapshotId = UUID.randomUUID(); String baseSnapshotName = snapshotNamePrefix + baseSnapshotId; snapshotInfo = new SnapshotInfo.Builder() @@ -354,7 +338,7 @@ public void init() throws RocksDBException, IOException, ExecutionException { when(ozoneManager.getConfiguration()).thenReturn(configuration); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - omSnapshotManager = mock(OmSnapshotManager.class); + OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); SnapshotCache snapshotCache = new SnapshotCache(mockCacheLoader(), 10, omMetrics, 0, true, new OmReadOnlyLock()); @@ -366,7 +350,7 @@ public void init() throws RocksDBException, IOException, ExecutionException { return snapshotCache.get(snapInfo.getSnapshotId()); }); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - snapshotDiffManager = new SnapshotDiffManager(db, differ, ozoneManager, localDataManager, + snapshotDiffManager = new SnapshotDiffManager(db, ozoneManager, snapDiffJobTable, snapDiffReportTable, columnFamilyOptions, codecRegistry); when(omSnapshotManager.getDiffCleanupServiceInterval()).thenReturn(0L); } @@ -397,12 +381,6 @@ private OmSnapshot getMockedOmSnapshot(UUID snapshotId) { return omSnapshot; } - private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { - SnapshotInfo snapInfo = mock(SnapshotInfo.class); - when(snapInfo.getSnapshotId()).thenReturn(snapshotId); - return snapInfo; - } - private Table getMockedTable( Map map, String tableName) throws IOException { From a495846554b0c5eba467822aa7d4344aa4304cf3 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 17 Nov 2025 11:11:33 -0500 Subject: [PATCH 118/126] HDDS-13949. Move dbTxSequenceNumber from SnapshotInfo to LocalDataYaml file Change-Id: I714b025ef88c34a2753ca24aeeaf072b0d505ea5 --- .../org/apache/hadoop/ozone/OzoneConsts.java | 1 + .../hadoop/ozone/om/helpers/SnapshotInfo.java | 13 -------- .../ozone/om/helpers/TestOmSnapshotInfo.java | 5 ---- .../hadoop/ozone/freon/TestOMSnapshotDAG.java | 3 +- .../src/main/proto/OmClientProtocol.proto | 2 +- .../hadoop/ozone/om/OmSnapshotLocalData.java | 11 ++++++- .../ozone/om/OmSnapshotLocalDataYaml.java | 3 +- .../snapshot/OMSnapshotCreateRequest.java | 7 ----- .../snapshot/OmSnapshotLocalDataManager.java | 17 +++++++---- .../diff/delta/RDBDifferComputer.java | 2 +- .../ozone/om/TestOmSnapshotLocalDataYaml.java | 3 +- .../TestOmSnapshotLocalDataManager.java | 30 ++++++++++++------- 12 files changed, 50 insertions(+), 47 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index aecbdfae615d..1152066408ef 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -222,6 +222,7 @@ public final class OzoneConsts { public static final String OM_SST_FILE_INFO_END_KEY = "endKey"; public static final String OM_SST_FILE_INFO_COL_FAMILY = "columnFamily"; public static final String OM_SLD_TXN_INFO = "transactionInfo"; + public static final String OM_SLD_DB_TXN_SEQ_NUMBER = "dbTxSequenceNumber"; // YAML fields for .container files public static final String CONTAINER_ID = "containerID"; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index a26422cd81fb..20de1b16496a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -74,7 +74,6 @@ public final class SnapshotInfo implements Auditable, CopyObject { /** * RocksDB's transaction sequence number at the time of checkpoint creation. */ - private long dbTxSequenceNumber; private boolean deepClean; private boolean sstFiltered; private long referencedSize; @@ -98,7 +97,6 @@ private SnapshotInfo(Builder b) { this.pathPreviousSnapshotId = b.pathPreviousSnapshotId; this.globalPreviousSnapshotId = b.globalPreviousSnapshotId; this.snapshotPath = b.snapshotPath; - this.dbTxSequenceNumber = b.dbTxSequenceNumber; this.deepClean = b.deepClean; this.sstFiltered = b.sstFiltered; this.referencedSize = b.referencedSize; @@ -221,7 +219,6 @@ public SnapshotInfo.Builder toBuilder() { .setPathPreviousSnapshotId(pathPreviousSnapshotId) .setGlobalPreviousSnapshotId(globalPreviousSnapshotId) .setSnapshotPath(snapshotPath) - .setDbTxSequenceNumber(dbTxSequenceNumber) .setDeepClean(deepClean) .setSstFiltered(sstFiltered) .setReferencedSize(referencedSize) @@ -441,7 +438,6 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() { } sib.setSnapshotPath(snapshotPath) - .setDbTxSequenceNumber(dbTxSequenceNumber) .setDeepClean(deepClean); return sib.build(); } @@ -558,14 +554,6 @@ public String getCheckpointDirName(int version) { return getCheckpointDirName(getSnapshotId(), version); } - public long getDbTxSequenceNumber() { - return dbTxSequenceNumber; - } - - public void setDbTxSequenceNumber(long dbTxSequenceNumber) { - this.dbTxSequenceNumber = dbTxSequenceNumber; - } - /** * Get the table key for this snapshot. */ @@ -751,7 +739,6 @@ public String toString() { ", pathPreviousSnapshotId: '" + pathPreviousSnapshotId + '\'' + ", globalPreviousSnapshotId: '" + globalPreviousSnapshotId + '\'' + ", snapshotPath: '" + snapshotPath + '\'' + - ", dbTxSequenceNumber: '" + dbTxSequenceNumber + '\'' + ", deepClean: '" + deepClean + '\'' + ", sstFiltered: '" + sstFiltered + '\'' + ", referencedSize: '" + referencedSize + '\'' + diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java index e7695debd619..30f7e475c211 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java @@ -45,7 +45,6 @@ public class TestOmSnapshotInfo { private static final UUID GLOBAL_PREVIOUS_SNAPSHOT_ID = PATH_PREVIOUS_SNAPSHOT_ID; private static final String SNAPSHOT_PATH = "test/path"; - private static final long DB_TX_SEQUENCE_NUMBER = 12345L; private SnapshotInfo createSnapshotInfo() { return new SnapshotInfo.Builder() @@ -59,7 +58,6 @@ private SnapshotInfo createSnapshotInfo() { .setPathPreviousSnapshotId(PATH_PREVIOUS_SNAPSHOT_ID) .setGlobalPreviousSnapshotId(GLOBAL_PREVIOUS_SNAPSHOT_ID) .setSnapshotPath(SNAPSHOT_PATH) - .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER) .setDeepClean(false) .setSstFiltered(false) .setReferencedSize(2000L) @@ -84,7 +82,6 @@ private OzoneManagerProtocolProtos.SnapshotInfo createSnapshotInfoProto() { .setPathPreviousSnapshotID(toProtobuf(PATH_PREVIOUS_SNAPSHOT_ID)) .setGlobalPreviousSnapshotID(toProtobuf(GLOBAL_PREVIOUS_SNAPSHOT_ID)) .setSnapshotPath(SNAPSHOT_PATH) - .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER) .setDeepClean(false) .setSstFiltered(false) .setReferencedSize(2000L) @@ -164,8 +161,6 @@ public void testSnapshotInfoProtoToSnapshotInfo() { snapshotInfoActual.getBucketName()); assertEquals(snapshotInfoExpected.getSnapshotStatus(), snapshotInfoActual.getSnapshotStatus()); - assertEquals(snapshotInfoExpected.getDbTxSequenceNumber(), - snapshotInfoActual.getDbTxSequenceNumber()); assertEquals(snapshotInfoExpected.isDeepCleaned(), snapshotInfoActual.isDeepCleaned()); assertEquals(snapshotInfoExpected.isSstFiltered(), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java index 3901eeeb0e4d..c421e02705c8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java @@ -161,7 +161,8 @@ private DifferSnapshotVersion getDifferSnapshotInfo( throw new IllegalStateException(String.format("Duplicate key %s", u)); }, TreeMap::new)); DifferSnapshotInfo dsi = new DifferSnapshotInfo((version) -> Paths.get(checkpointPath), - snapshotInfo.getSnapshotId(), snapshotInfo.getDbTxSequenceNumber(), versionSstFiles); + snapshotInfo.getSnapshotId(), snapshotLocalData.getSnapshotLocalData().getDbTxSequenceNumber(), + versionSstFiles); return new DifferSnapshotVersion(dsi, 0, COLUMN_FAMILIES_TO_TRACK_IN_DAG); } } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 1e5675f612e6..791ae12b86d3 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -881,7 +881,7 @@ message SnapshotInfo { optional hadoop.hdds.UUID globalPreviousSnapshotID = 9; optional string snapshotPath = 10; optional string checkpointDir = 11 [deprecated = true]; - optional int64 dbTxSequenceNumber = 12; + optional int64 dbTxSequenceNumber = 12 [deprecated = true]; optional bool deepClean = 13; optional bool sstFiltered = 14; // snapshot reference size before any key replication or EC diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 91ec8b673a89..b3e9452642ea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -67,6 +67,9 @@ public class OmSnapshotLocalData implements WithChecksum { // Stores the transactionInfo corresponding to OM when the snaphot is purged. private TransactionInfo transactionInfo; + // Stores the rocksDB's transaction sequence number at the time of snapshot creation.' + private long dbTxSequenceNumber; + // Map of version to VersionMeta, using linkedHashMap since the order of the map needs to be deterministic for // checksum computation. private final LinkedHashMap versionSstFileInfos; @@ -78,7 +81,7 @@ public class OmSnapshotLocalData implements WithChecksum { * Creates a OmSnapshotLocalData object with default values. */ public OmSnapshotLocalData(UUID snapshotId, List notDefraggedSSTFileList, UUID previousSnapshotId, - TransactionInfo transactionInfo) { + TransactionInfo transactionInfo, long dbTxSequenceNumber) { this.snapshotId = snapshotId; this.isSSTFiltered = false; this.lastDefragTime = 0L; @@ -89,9 +92,14 @@ public OmSnapshotLocalData(UUID snapshotId, List notDefraggedS this.version = 0; this.previousSnapshotId = previousSnapshotId; this.transactionInfo = transactionInfo; + this.dbTxSequenceNumber = dbTxSequenceNumber; setChecksumTo0ByteArray(); } + public long getDbTxSequenceNumber() { + return dbTxSequenceNumber; + } + /** * Copy constructor to create a deep copy of OmSnapshotLocalData object. * @param source The source OmSnapshotLocalData to copy from @@ -108,6 +116,7 @@ public OmSnapshotLocalData(OmSnapshotLocalData source) { this.versionSstFileInfos = new LinkedHashMap<>(); setVersionSstFileInfos(source.versionSstFileInfos); this.transactionInfo = source.transactionInfo; + this.dbTxSequenceNumber = source.dbTxSequenceNumber; } public TransactionInfo getTransactionInfo() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index ad8046d719e0..b72e74cf4a6b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -172,9 +172,10 @@ public Object construct(Node node) { final String prevSnapIdStr = (String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID); UUID prevSnapId = prevSnapIdStr != null ? UUID.fromString(prevSnapIdStr) : null; final String purgeTxInfoStr = (String) nodes.get(OzoneConsts.OM_SLD_TXN_INFO); + final long dbTxnSeqNumber = ((Number)nodes.get(OzoneConsts.OM_SLD_DB_TXN_SEQ_NUMBER)).longValue(); TransactionInfo transactionInfo = purgeTxInfoStr != null ? TransactionInfo.valueOf(purgeTxInfoStr) : null; OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId, - transactionInfo); + transactionInfo, dbTxnSeqNumber); // Set version from YAML Integer version = (Integer) nodes.get(OzoneConsts.OM_SLD_VERSION); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java index 6211d4114005..07a8aeed3139 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; @@ -167,12 +166,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut throw new OMException("Snapshot already exists", FILE_ALREADY_EXISTS); } - // Note down RDB latest transaction sequence number, which is used - // as snapshot generation in the Differ. - final long dbLatestSequenceNumber = - ((RDBStore) omMetadataManager.getStore()).getDb() - .getLatestSequenceNumber(); - snapshotInfo.setDbTxSequenceNumber(dbLatestSequenceNumber); ByteString txnBytes = TransactionInfo.valueOf(context.getTermIndex()).toByteString(); snapshotInfo.setCreateTransactionInfo(txnBytes); snapshotInfo.setLastTransactionInfo(txnBytes); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 10e296a2c859..b1de439bb0be 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -181,11 +181,14 @@ public String getSnapshotLocalPropertyYamlPath(UUID snapshotId) { public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInfo snapshotInfo) throws IOException { try (WritableOmSnapshotLocalDataProvider snapshotLocalData = new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(), - () -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), - getLiveSSTFilesForCFs(snapshotStore.getDb().getManagedRocksDb(), - COLUMN_FAMILIES_TO_TRACK_IN_SNAPSHOT), - snapshotInfo.getPathPreviousSnapshotId(), null), - null))) { + () -> { + List lfms = getLiveSSTFilesForCFs(snapshotStore.getDb().getManagedRocksDb(), + COLUMN_FAMILIES_TO_TRACK_IN_SNAPSHOT); + long dbTxnSeqNumber = lfms.stream().mapToLong(LiveFileMetaData::largestSeqno).max().orElse(0L); + OmSnapshotLocalData localData = new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), + lfms, snapshotInfo.getPathPreviousSnapshotId(), null, dbTxnSeqNumber); + return Pair.of(localData, null); + })) { snapshotLocalData.commit(); } } @@ -263,16 +266,18 @@ private void addMissingSnapshotYamlFiles( // Create a yaml file for snapshots which are missing if (!snapshotLocalDataFile.exists()) { List sstList = Collections.emptyList(); + long dbTxnSeqNumber = 0L; if (snapshotInfo.getSnapshotStatus() == SNAPSHOT_ACTIVE) { try (OmMetadataManagerImpl snapshotMetadataManager = defaultSnapProvider.apply(snapshotInfo)) { ManagedRocksDB snapDB = ((RDBStore)snapshotMetadataManager.getStore()).getDb().getManagedRocksDb(); sstList = getLiveSSTFilesForCFs(snapDB, COLUMN_FAMILIES_TO_TRACK_IN_SNAPSHOT); + dbTxnSeqNumber = sstList.stream().mapToLong(LiveFileMetaData::largestSeqno).max().orElse(0L); } catch (Exception e) { throw new IOException(e); } } OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapshotId, sstList, - snapshotInfo.getPathPreviousSnapshotId(), null); + snapshotInfo.getPathPreviousSnapshotId(), null, dbTxnSeqNumber); snapshotLocalDataSerializer.save(snapshotLocalDataFile, snapshotLocalData); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java index 0a59029fb0f4..d28b8fe0e1d1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java @@ -101,7 +101,7 @@ public Optional>> computeDeltaFiles(SnapshotIn private static DifferSnapshotInfo getDSIFromSI(OMMetadataManager activeOmMetadataManager, SnapshotInfo snapshotInfo, OmSnapshotLocalData snapshotLocalData) throws IOException { final UUID snapshotId = snapshotInfo.getSnapshotId(); - final long dbTxSequenceNumber = snapshotInfo.getDbTxSequenceNumber(); + final long dbTxSequenceNumber = snapshotLocalData.getDbTxSequenceNumber(); NavigableMap> versionSstFiles = snapshotLocalData.getVersionSstFileInfos().entrySet() .stream().collect(toMap(Map.Entry::getKey, entry -> entry.getValue().getSstFiles(), (u, v) -> { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index 81f111e8464b..34b9fbe397ec 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -118,7 +118,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName, Trans createLiveFileMetaData("sst2", "table1", "k3", "k4"), createLiveFileMetaData("sst3", "table2", "k4", "k5")); OmSnapshotLocalData dataYaml = new OmSnapshotLocalData(snapshotId, notDefraggedSSTFileList, - previousSnapshotId, transactionInfo); + previousSnapshotId, transactionInfo, 10); // Set version dataYaml.setVersion(42); @@ -164,6 +164,7 @@ public void testWriteToYaml() throws IOException { // Verify fields assertEquals(44, snapshotData.getVersion()); + assertEquals(10, snapshotData.getDbTxSequenceNumber()); assertTrue(snapshotData.getSstFiltered()); assertEquals(transactionInfo, snapshotData.getTransactionInfo()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index fff404803649..ba5511f52c2b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -790,15 +790,17 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { expNotDefraggedSSTFileList.put(DIRECTORY_TABLE, Stream.of("dt1", "dt2").collect(Collectors.toList())); List mockedLiveFiles = new ArrayList<>(); + int seqNumber = 0; for (Map.Entry> entry : expNotDefraggedSSTFileList.entrySet()) { String cfname = entry.getKey(); for (String fname : entry.getValue()) { - mockedLiveFiles.add(createMockLiveFileMetaData("/" + fname + ".sst", cfname, "k1", "k2")); + mockedLiveFiles.add(createMockLiveFileMetaData("/" + fname + ".sst", cfname, "k1", "k2", seqNumber++)); } } + int expectedDbTxSequenceNumber = seqNumber - 1; // Add some other column families and files that should be ignored - mockedLiveFiles.add(createMockLiveFileMetaData("ot1.sst", "otherTable", "k1", "k2")); - mockedLiveFiles.add(createMockLiveFileMetaData("ot2.sst", "otherTable", "k1", "k2")); + mockedLiveFiles.add(createMockLiveFileMetaData("ot1.sst", "otherTable", "k1", "k2", seqNumber++)); + mockedLiveFiles.add(createMockLiveFileMetaData("ot2.sst", "otherTable", "k1", "k2", seqNumber)); mockSnapshotStore(snapshotId, mockedLiveFiles); localDataManager = getNewOmSnapshotLocalDataManager(); @@ -820,6 +822,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { assertEquals(0L, localData.getLastDefragTime()); assertTrue(localData.getNeedsDefrag()); assertEquals(1, localData.getVersionSstFileInfos().size()); + assertEquals(expectedDbTxSequenceNumber, localData.getDbTxSequenceNumber()); } @Test @@ -829,12 +832,12 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { // Setup snapshot store mock List sstFiles = new ArrayList<>(); - sstFiles.add(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key7")); - sstFiles.add(createMockLiveFileMetaData("file2.sst", KEY_TABLE, "key3", "key9")); - sstFiles.add(createMockLiveFileMetaData("file3.sst", FILE_TABLE, "key1", "key7")); - sstFiles.add(createMockLiveFileMetaData("file4.sst", FILE_TABLE, "key1", "key7")); - sstFiles.add(createMockLiveFileMetaData("file5.sst", DIRECTORY_TABLE, "key1", "key7")); - sstFiles.add(createMockLiveFileMetaData("file6.sst", "colFamily1", "key1", "key7")); + sstFiles.add(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key7", 10)); + sstFiles.add(createMockLiveFileMetaData("file2.sst", KEY_TABLE, "key3", "key9", 20)); + sstFiles.add(createMockLiveFileMetaData("file3.sst", FILE_TABLE, "key1", "key7", 30)); + sstFiles.add(createMockLiveFileMetaData("file4.sst", FILE_TABLE, "key1", "key7", 100)); + sstFiles.add(createMockLiveFileMetaData("file5.sst", DIRECTORY_TABLE, "key1", "key7", 5000)); + sstFiles.add(createMockLiveFileMetaData("file6.sst", "colFamily1", "key1", "key7", 6000)); List sstFileInfos = IntStream.range(0, sstFiles.size() - 1) .mapToObj(sstFiles::get).map(lfm -> new SstFileInfo(lfm.fileName().replace(".sst", ""), @@ -856,6 +859,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { assertEquals(expectedVersionMeta, versionMeta); // New Snapshot create needs to be defragged always. assertTrue(snapshotLocalData.needsDefrag()); + assertEquals(5000, snapshotLocalData.getSnapshotLocalData().getDbTxSequenceNumber()); } } @@ -1060,11 +1064,17 @@ private SnapshotInfo createMockSnapshotInfo(UUID snapshotId, UUID previousSnapsh private LiveFileMetaData createMockLiveFileMetaData(String fileName, String columnFamilyName, String smallestKey, String largestKey) { + return createMockLiveFileMetaData(fileName, columnFamilyName, smallestKey, largestKey, 0); + } + + private LiveFileMetaData createMockLiveFileMetaData(String fileName, String columnFamilyName, String smallestKey, + String largestKey, long largestSeqNumber) { LiveFileMetaData liveFileMetaData = mock(LiveFileMetaData.class); when(liveFileMetaData.columnFamilyName()).thenReturn(StringUtils.string2Bytes(columnFamilyName)); when(liveFileMetaData.fileName()).thenReturn(fileName); when(liveFileMetaData.smallestKey()).thenReturn(StringUtils.string2Bytes(smallestKey)); when(liveFileMetaData.largestKey()).thenReturn(StringUtils.string2Bytes(largestKey)); + when(liveFileMetaData.largestSeqno()).thenReturn(largestSeqNumber); return liveFileMetaData; } @@ -1074,7 +1084,7 @@ private OmSnapshotLocalData createMockLocalData(UUID snapshotId, UUID previousSn sstFiles.add(createMockLiveFileMetaData("file2.sst", "columnFamily1", "key3", "key10")); sstFiles.add(createMockLiveFileMetaData("file3.sst", "columnFamily2", "key1", "key8")); sstFiles.add(createMockLiveFileMetaData("file4.sst", "columnFamily2", "key0", "key10")); - return new OmSnapshotLocalData(snapshotId, sstFiles, previousSnapshotId, null); + return new OmSnapshotLocalData(snapshotId, sstFiles, previousSnapshotId, null, 10); } private void createSnapshotLocalDataFile(UUID snapshotId, UUID previousSnapshotId) From 1e0471124c67aefd352c6c0611d863da0ae36e65 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 18 Nov 2025 07:30:57 -0500 Subject: [PATCH 119/126] HDDS-13929. Fix findbugs Change-Id: I6bedaf83ebfc65f46a7c3fd298fa7dc5f26d7fd1 --- .../ozone-manager/dev-support/findbugsExcludeFile.xml | 4 ++++ .../java/org/apache/hadoop/ozone/om/OmSnapshotManager.java | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml index 55abc2630178..739fd1f8b40d 100644 --- a/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml +++ b/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml @@ -16,4 +16,8 @@ limitations under the License. --> + + + + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index b79282b1aafa..c3b9feae77fe 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -262,10 +262,6 @@ public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { } this.ozoneManager = ozoneManager; - RocksDBCheckpointDiffer differ = ozoneManager - .getMetadataManager() - .getStore() - .getRocksDBCheckpointDiffer(); // Soft-limit of lru cache size this.softCacheSize = ozoneManager.getConfiguration().getInt( From 6fc9a2cb3da96c799e43f71a57186f0780ae92b5 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 18 Nov 2025 07:43:01 -0500 Subject: [PATCH 120/126] HDDS-13949. Fix findbugs Change-Id: I63db69c634d96de05aaad865b467f1eee671440c --- .../hadoop/ozone/om/helpers/SnapshotInfo.java | 13 ------------- .../diff/delta/TestCompositeDeltaDiffComputer.java | 3 +-- .../snapshot/diff/delta/TestRDBDifferComputer.java | 3 +-- 3 files changed, 2 insertions(+), 17 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 8fd922d210ec..6188b51d3528 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -71,9 +71,6 @@ public final class SnapshotInfo implements Auditable, CopyObject { private UUID pathPreviousSnapshotId; private UUID globalPreviousSnapshotId; private String snapshotPath; // snapshot mask - /** - * RocksDB's transaction sequence number at the time of checkpoint creation. - */ private boolean deepClean; private boolean sstFiltered; private long referencedSize; @@ -246,7 +243,6 @@ public static class Builder { private UUID pathPreviousSnapshotId; private UUID globalPreviousSnapshotId; private String snapshotPath; - private long dbTxSequenceNumber; private boolean deepClean; private boolean sstFiltered; private long referencedSize; @@ -324,12 +320,6 @@ public Builder setSnapshotPath(String snapshotPath) { return this; } - /** @param dbTxSequenceNumber - RDB latest transaction sequence number. */ - public Builder setDbTxSequenceNumber(long dbTxSequenceNumber) { - this.dbTxSequenceNumber = dbTxSequenceNumber; - return this; - } - /** @param deepClean - To be deep cleaned status for snapshot. */ public Builder setDeepClean(boolean deepClean) { this.deepClean = deepClean; @@ -520,9 +510,6 @@ public static Builder builderFromProtobuf( osib.setCreateTransactionInfo(snapshotInfoProto.getCreateTransactionInfo()); } - osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath()) - .setDbTxSequenceNumber(snapshotInfoProto.getDbTxSequenceNumber()); - return osib; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestCompositeDeltaDiffComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestCompositeDeltaDiffComputer.java index e8af3f84dd72..b64520a05c14 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestCompositeDeltaDiffComputer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestCompositeDeltaDiffComputer.java @@ -718,8 +718,7 @@ private SnapshotInfo createMockSnapshotInfo(String volumeName, String bucketName .setVolumeName(volumeName) .setBucketName(bucketName) .setName(snapshotName) - .setSnapshotId(snapshotId) - .setDbTxSequenceNumber(100L); + .setSnapshotId(snapshotId); return builder.build(); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java index b4ba058a43c2..8476c6df8bc1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java @@ -486,8 +486,7 @@ private SnapshotInfo createMockSnapshotInfo(String volumeName, String bucketName .setVolumeName(volumeName) .setBucketName(bucketName) .setName(snapshotName) - .setSnapshotId(snapshotId) - .setDbTxSequenceNumber(100L); + .setSnapshotId(snapshotId); return builder.build(); } From 1201350db0893a119233a781f7c8dcb04911e2ba Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 19 Nov 2025 00:45:53 -0500 Subject: [PATCH 121/126] HDDS-13929. Fix javadoc Change-Id: Ie464f0ed154e8911024dbe7802338e11a83f502a --- .../src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java index f4f2d741ecec..06870b2d8de3 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java @@ -69,7 +69,6 @@ public static Map filterRelevantSstFiles(Map /** * Filter sst files based on prefixes. The set of sst files to be filtered would be mutated. - * @param Type of the key in the map. * @param filesToBeFiltered sst files to be filtered. * @param tablesToLookup Set of column families to be included in the diff. * @param tablePrefixInfo TablePrefixInfo to filter irrelevant SST files. From edf0213618493940fa0bcaf24ba9905254729d86 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 19 Nov 2025 08:41:26 -0500 Subject: [PATCH 122/126] HDDS-13949. Add back snapshotPath init removed by mistake Change-Id: I09301ff17cf3c8abab5785d25336c7633f8ac293 --- .../java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 6188b51d3528..0e2a4b6ee796 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -509,6 +509,7 @@ public static Builder builderFromProtobuf( if (snapshotInfoProto.hasCreateTransactionInfo()) { osib.setCreateTransactionInfo(snapshotInfoProto.getCreateTransactionInfo()); } + osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath()); return osib; } From 42ca3d739b65ba285f1120edf984e694c29a8dbd Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 19 Nov 2025 12:26:16 -0500 Subject: [PATCH 123/126] HDDS-13929. Fix comment Change-Id: I6ae5c5897f9ca8580ba68b17ad77dffa35dfafb6 --- .../diff/delta/FileLinkDeltaFileComputer.java | 15 ++++++++------- .../om/snapshot/diff/delta/FullDiffComputer.java | 2 ++ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java index ff4bac52dba7..a6860574339e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FileLinkDeltaFileComputer.java @@ -49,7 +49,8 @@ /** * The {@code FileLinkDeltaFileComputer} is an abstract class that provides a * base implementation for the {@code DeltaFileComputer} interface. It is - * responsible for computing delta files by creating hard links to the + * responsible for computing delta files (a list of files if read completely would be able to completely + * compute all the key changes between two snapshots). Hard links to the * relevant source files in a specified delta directory, enabling a compact * representation of changes between snapshots. * @@ -63,16 +64,16 @@ public abstract class FileLinkDeltaFileComputer implements DeltaFileComputer { private final OmSnapshotManager omSnapshotManager; private final OMMetadataManager activeMetadataManager; private final Consumer activityReporter; - private final Path deltaDir; + private final Path tmpDeltaFileLinkDir; private final AtomicInteger linkFileCounter = new AtomicInteger(0); FileLinkDeltaFileComputer(OmSnapshotManager snapshotManager, OMMetadataManager activeMetadataManager, Path deltaDirPath, Consumer activityReporter) throws IOException { - this.deltaDir = deltaDirPath.toAbsolutePath(); + this.tmpDeltaFileLinkDir = deltaDirPath.toAbsolutePath(); this.omSnapshotManager = snapshotManager; this.activityReporter = activityReporter; this.activeMetadataManager = activeMetadataManager; - createDirectories(deltaDir); + createDirectories(tmpDeltaFileLinkDir); } /** @@ -119,7 +120,7 @@ Path createLink(Path path) throws IOException { String extension = getExtension(fileName.toString()); extension = StringUtils.isBlank(extension) ? "" : ("." + extension); do { - link = deltaDir.resolve(linkFileCounter.incrementAndGet() + extension); + link = tmpDeltaFileLinkDir.resolve(linkFileCounter.incrementAndGet() + extension); try { Files.createLink(link, source); createdLink = true; @@ -147,9 +148,9 @@ OMMetadataManager getActiveMetadataManager() { @Override public void close() throws IOException { - if (deltaDir == null || Files.notExists(deltaDir)) { + if (tmpDeltaFileLinkDir == null || Files.notExists(tmpDeltaFileLinkDir)) { return; } - deleteDirectory(deltaDir); + deleteDirectory(tmpDeltaFileLinkDir); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java index e3c6c0dcae46..6beb5f7dc9b9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/FullDiffComputer.java @@ -46,6 +46,8 @@ * in files and generates corresponding links for easier processing of snapshot diffs. * This implementation handles cases of optimized inode-based comparisons as well as * fallback with full file list comparisons in case of exceptions. + * The delta files would be all files which are present in the source snapshot and not present in the target snapshot + * and vice versa. */ class FullDiffComputer extends FileLinkDeltaFileComputer { From faa1380b194356fd07c287cc2dcc0dc38c47f964 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 19 Nov 2025 12:31:00 -0500 Subject: [PATCH 124/126] HDDS-13929. Change error log to warn Change-Id: Ib6089c9c7d8c787edd75721234d94d1ea0eb88c9 --- .../om/snapshot/diff/delta/CompositeDeltaDiffComputer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/CompositeDeltaDiffComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/CompositeDeltaDiffComputer.java index 5c109bd0f996..4ef17d841141 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/CompositeDeltaDiffComputer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/CompositeDeltaDiffComputer.java @@ -88,7 +88,7 @@ Optional>> computeDeltaFiles(SnapshotInfo from tablePrefixInfo).orElse(null); } } catch (Exception e) { - LOG.error("Falling back to full diff.", e); + LOG.warn("Falling back to full diff.", e); } if (deltaFiles == null) { updateActivity(SnapshotDiffResponse.SubStatus.SST_FILE_DELTA_FULL_DIFF); From 4a36e9865f74de8921335af382fa8f94b8306f30 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 19 Nov 2025 21:54:23 -0500 Subject: [PATCH 125/126] HDDS-13949. Address review comments from HDDS-13929 Change-Id: I227cf7471e5235204926fb5f48c754516222fc30 --- .../apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java | 4 +--- .../ozone/om/snapshot/diff/delta/RDBDifferComputer.java | 6 +++--- .../om/snapshot/diff/delta/TestRDBDifferComputer.java | 7 +------ 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index e11abc3fdb16..7c4b878d62be 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -771,9 +771,7 @@ private Path getSSTFullPath(SstFileInfo sstFileInfo, Path... dbPaths) throws IOE * @param dest destination snapshot * @param versionMap version map containing the connection between source snapshot version and dest snapshot version. * @param tablesToLookup tablesToLookup set of table (column family) names used to restrict which SST files to return. - * @return A list of SST files without extension. - * e.g. ["/path/to/sstBackupDir/000050.sst", - * "/path/to/sstBackupDir/000060.sst"] + * @return map of SST file absolute paths with extension to SstFileInfo. */ public synchronized Optional> getSSTDiffListWithFullPath(DifferSnapshotInfo src, DifferSnapshotInfo dest, Map versionMap, TablePrefixInfo prefixInfo, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java index d28b8fe0e1d1..7ab7a5a68d75 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/RDBDifferComputer.java @@ -73,9 +73,9 @@ public Optional>> computeDeltaFiles(SnapshotIn if (differ != null) { try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider snapProvider = getLocalDataProvider(toSnapshot.getSnapshotId(), fromSnapshot.getSnapshotId())) { - final DifferSnapshotInfo fromDSI = getDSIFromSI(getActiveMetadataManager(), fromSnapshot, + final DifferSnapshotInfo fromDSI = toDifferSnapshotInfo(getActiveMetadataManager(), fromSnapshot, snapProvider.getPreviousSnapshotLocalData()); - final DifferSnapshotInfo toDSI = getDSIFromSI(getActiveMetadataManager(), toSnapshot, + final DifferSnapshotInfo toDSI = toDifferSnapshotInfo(getActiveMetadataManager(), toSnapshot, snapProvider.getSnapshotLocalData()); final Map versionMap = snapProvider.getSnapshotLocalData().getVersionSstFileInfos().entrySet() .stream().collect(toMap(Map.Entry::getKey, entry -> entry.getValue().getPreviousSnapshotVersion())); @@ -98,7 +98,7 @@ public Optional>> computeDeltaFiles(SnapshotIn /** * Convert from SnapshotInfo to DifferSnapshotInfo. */ - private static DifferSnapshotInfo getDSIFromSI(OMMetadataManager activeOmMetadataManager, + private static DifferSnapshotInfo toDifferSnapshotInfo(OMMetadataManager activeOmMetadataManager, SnapshotInfo snapshotInfo, OmSnapshotLocalData snapshotLocalData) throws IOException { final UUID snapshotId = snapshotInfo.getSnapshotId(); final long dbTxSequenceNumber = snapshotLocalData.getDbTxSequenceNumber(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java index 8476c6df8bc1..83facf633063 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java @@ -363,7 +363,7 @@ public void testComputeDeltaFilesWithVersionMapping() throws IOException { * Tests that getDSIFromSI throws exception when no versions found. */ @Test - public void testGetDSIFromSIWithNoVersions() throws IOException { + public void testToDifferSnapshotInfoWithNoVersions() throws IOException { rdbDifferComputer = new RDBDifferComputer(omSnapshotManager, activeMetadataManager, deltaDirPath, activityReporter); @@ -527,8 +527,3 @@ private OmSnapshotLocalData createMockSnapshotLocalDataWithVersions(UUID snapsho return localData; } } - - - - - From 668ca9d80f63231b930aeeabaa07dafc1135d099 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 20 Nov 2025 12:03:59 -0500 Subject: [PATCH 126/126] HDDS-13949. Address review comments Change-Id: If0400fa3ac799e8487888dd1149914fe093a5e6f --- .../java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java | 2 +- .../ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index b3e9452642ea..f876a9606017 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -67,7 +67,7 @@ public class OmSnapshotLocalData implements WithChecksum { // Stores the transactionInfo corresponding to OM when the snaphot is purged. private TransactionInfo transactionInfo; - // Stores the rocksDB's transaction sequence number at the time of snapshot creation.' + // Stores the rocksDB's transaction sequence number at the time of snapshot creation. private long dbTxSequenceNumber; // Map of version to VersionMeta, using linkedHashMap since the order of the map needs to be deterministic for diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java index 83facf633063..19579a59e16e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/diff/delta/TestRDBDifferComputer.java @@ -360,7 +360,7 @@ public void testComputeDeltaFilesWithVersionMapping() throws IOException { } /** - * Tests that getDSIFromSI throws exception when no versions found. + * Tests that toDifferSnapshotInfo throws exception when no versions found. */ @Test public void testToDifferSnapshotInfoWithNoVersions() throws IOException {