From 16014eaff12ab3455eeeac937549dc2d1eb1a49a Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 15 Jul 2024 12:48:47 -0700 Subject: [PATCH] HDDS-11183. Keys from DeletedTable and DeletedDirTable of AOS should be deleted on batch operation while creating a snapshot Change-Id: I18b6cba6302cc4690218269e4770ffd62c1a5971 --- .../hadoop/ozone/om/OmSnapshotManager.java | 15 ++++++++------- .../snapshot/OMSnapshotCreateResponse.java | 2 +- .../hadoop/ozone/om/TestOmSnapshotManager.java | 17 +++++++++++++---- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index de9219db40f2..636b2594e0c4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.RDBStore; @@ -447,7 +448,7 @@ public void invalidateCacheEntry(UUID key) { * @return instance of DBCheckpoint */ public static DBCheckpoint createOmSnapshotCheckpoint( - OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) + OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo, BatchOperation batchOperation) throws IOException { RDBStore store = (RDBStore) omMetadataManager.getStore(); @@ -468,10 +469,10 @@ public static DBCheckpoint createOmSnapshotCheckpoint( // Clean up active DB's deletedTable right after checkpoint is taken, // There is no need to take any lock as of now, because transactions are flushed sequentially. deleteKeysFromDelKeyTableInSnapshotScope(omMetadataManager, - snapshotInfo.getVolumeName(), snapshotInfo.getBucketName()); + snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), batchOperation); // Clean up deletedDirectoryTable as well deleteKeysFromDelDirTableInSnapshotScope(omMetadataManager, - snapshotInfo.getVolumeName(), snapshotInfo.getBucketName()); + snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), batchOperation); if (dbCheckpoint != null && snapshotDirExist) { LOG.info("Checkpoint : {} for snapshot {} already exists.", @@ -494,7 +495,7 @@ public static DBCheckpoint createOmSnapshotCheckpoint( */ private static void deleteKeysFromDelDirTableInSnapshotScope( OMMetadataManager omMetadataManager, String volumeName, - String bucketName) throws IOException { + String bucketName, BatchOperation batchOperation) throws IOException { // Range delete start key (inclusive) final String keyPrefix = getOzonePathKeyForFso(omMetadataManager, @@ -507,7 +508,7 @@ private static void deleteKeysFromDelDirTableInSnapshotScope( if (LOG.isDebugEnabled()) { LOG.debug("Removing key {} from DeletedDirTable", entry.getKey()); } - omMetadataManager.getDeletedDirTable().delete(entry.getKey()); + omMetadataManager.getDeletedDirTable().deleteWithBatch(batchOperation, entry.getKey()); return null; }); } @@ -580,7 +581,7 @@ private static void deleteRangeInclusive( */ private static void deleteKeysFromDelKeyTableInSnapshotScope( OMMetadataManager omMetadataManager, String volumeName, - String bucketName) throws IOException { + String bucketName, BatchOperation batchOperation) throws IOException { // Range delete start key (inclusive) final String keyPrefix = @@ -593,7 +594,7 @@ private static void deleteKeysFromDelKeyTableInSnapshotScope( if (LOG.isDebugEnabled()) { LOG.debug("Removing key {} from DeletedTable", entry.getKey()); } - omMetadataManager.getDeletedTable().delete(entry.getKey()); + omMetadataManager.getDeletedTable().deleteWithBatch(batchOperation, entry.getKey()); return null; }); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java index 5b3db25114d9..b917296d826d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotCreateResponse.java @@ -79,7 +79,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, // Create the snapshot checkpoint. Also cleans up some tables. OmSnapshotManager.createOmSnapshotCheckpoint(omMetadataManager, - snapshotInfo); + snapshotInfo, batchOperation); // TODO: [SNAPSHOT] Move to createOmSnapshotCheckpoint and add table lock // Remove all entries from snapshotRenamedTable diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index c865cb7814de..ebc6bc6cb6c4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; @@ -176,9 +177,11 @@ public void testCloseOnEviction() throws IOException { ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager().addSnapshot(first); ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager().addSnapshot(second); + RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); // create the first snapshot checkpoint OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), - first); + first, rdbBatchOperation); + om.getMetadataManager().getStore().commitBatchOperation(rdbBatchOperation); // retrieve it and setup store mock OmSnapshotManager omSnapshotManager = om.getOmSnapshotManager(); @@ -190,8 +193,10 @@ public void testCloseOnEviction() throws IOException { firstSnapshot.getMetadataManager(), "store", firstSnapshotStore); // create second snapshot checkpoint (which will be used for eviction) + rdbBatchOperation = new RDBBatchOperation(); OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), - second); + second, rdbBatchOperation); + om.getMetadataManager().getStore().commitBatchOperation(rdbBatchOperation); // confirm store not yet closed verify(firstSnapshotStore, times(0)).close(); @@ -634,15 +639,19 @@ public void testCreateSnapshotIdempotent() throws Exception { when(snapshotInfoTable.get(first.getTableKey())).thenReturn(first); // Create first checkpoint for the snapshot checkpoint + RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), - first); + first, rdbBatchOperation); + om.getMetadataManager().getStore().commitBatchOperation(rdbBatchOperation); assertThat(logCapturer.getOutput()).doesNotContain( "for snapshot " + first.getName() + " already exists."); logCapturer.clearOutput(); // Create checkpoint again for the same snapshot. + rdbBatchOperation = new RDBBatchOperation(); OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), - first); + first, rdbBatchOperation); + om.getMetadataManager().getStore().commitBatchOperation(rdbBatchOperation); assertThat(logCapturer.getOutput()).contains( "for snapshot " + first.getName() + " already exists.");