diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java new file mode 100644 index 000000000000..0d565cbf3f7a --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +/** + * + * Represents a function that accepts one argument and produces a result. + * This is a functional interface whose functional method is apply(Object). + * Type parameters: + * – the type of the input to the function + * – the type of the result of the function + * - the type of exception thrown. + */ +public interface CheckedFunction { + R apply(T t) throws E; +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 4af8022bd1db..7916edadf321 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -41,6 +41,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyLong; @@ -53,6 +54,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import jakarta.annotation.Nonnull; import java.io.File; @@ -69,6 +71,7 @@ import java.util.Set; import java.util.TreeSet; import java.util.UUID; +import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.tuple.Pair; @@ -104,6 +107,7 @@ import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.server.SCMConfigurator; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; @@ -112,6 +116,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -140,6 +145,7 @@ import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.EnumSource; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; @@ -1585,6 +1591,144 @@ void testGetNotExistedPart() throws IOException { assertEquals(0, locationList.size()); } + private Table getMockedTable(Map map) throws IOException { + Table table = mock(Table.class); + when(table.get(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); + when(table.getIfExist(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); + return table; + } + + private OmKeyInfo getMockedOmKeyInfo(OmBucketInfo bucketInfo, long parentId, String key, long objectId) { + OmKeyInfo omKeyInfo = mock(OmKeyInfo.class); + if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { + when(omKeyInfo.getFileName()).thenReturn(key); + } else { + when(omKeyInfo.getParentObjectID()).thenReturn(parentId); + when(omKeyInfo.getKeyName()).thenReturn(key); + } + when(omKeyInfo.getObjectID()).thenReturn(objectId); + return omKeyInfo; + } + + private OmDirectoryInfo getMockedOmDirInfo(long parentId, String key, long objectId) { + OmDirectoryInfo omKeyInfo = mock(OmDirectoryInfo.class); + when(omKeyInfo.getName()).thenReturn(key); + when(omKeyInfo.getParentObjectID()).thenReturn(parentId); + when(omKeyInfo.getParentObjectID()).thenReturn(0L); + when(omKeyInfo.getObjectID()).thenReturn(objectId); + return omKeyInfo; + } + + private String getPath(long volumeId, OmBucketInfo bucketInfo, OmKeyInfo omKeyInfo) { + if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { + return volumeId + "/" + bucketInfo.getObjectID() + "/" + omKeyInfo.getParentObjectID() + "/" + + omKeyInfo.getFileName(); + } else { + return bucketInfo.getVolumeName() + "/" + bucketInfo.getBucketName() + "/" + omKeyInfo.getKeyName(); + } + } + + private String getPath(long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo omDirInfo) { + return volumeId + "/" + bucketInfo.getObjectID() + "/" + omDirInfo.getParentObjectID() + "/" + + omDirInfo.getName(); + } + + private String getRenameKey(String volume, String bucket, long objectId) { + return volume + "/" + bucket + "/" + objectId; + } + + @ParameterizedTest + @EnumSource(value = BucketLayout.class) + public void testPreviousSnapshotOzoneKeyInfo(BucketLayout bucketLayout) throws IOException { + OMMetadataManager omMetadataManager = mock(OMMetadataManager.class); + if (bucketLayout.isFileSystemOptimized()) { + when(omMetadataManager.getOzonePathKey(anyLong(), anyLong(), anyLong(), anyString())) + .thenAnswer(i -> Arrays.stream(i.getArguments()).map(Object::toString) + .collect(Collectors.joining("/"))); + } else { + when(omMetadataManager.getOzoneKey(anyString(), anyString(), anyString())) + .thenAnswer(i -> Arrays.stream(i.getArguments()).map(Object::toString) + .collect(Collectors.joining("/"))); + } + when(omMetadataManager.getRenameKey(anyString(), anyString(), anyLong())).thenAnswer( + i -> getRenameKey(i.getArgument(0), i.getArgument(1), i.getArgument(2))); + + OMMetadataManager previousMetadataManager = mock(OMMetadataManager.class); + OzoneConfiguration configuration = new OzoneConfiguration(); + KeyManagerImpl km = new KeyManagerImpl(null, null, omMetadataManager, configuration, null, null, null); + KeyManagerImpl prevKM = new KeyManagerImpl(null, null, previousMetadataManager, configuration, null, null, null); + long volumeId = 1L; + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setBucketName(BUCKET_NAME).setVolumeName(VOLUME_NAME) + .setObjectID(2L).setBucketLayout(bucketLayout).build(); + OmKeyInfo prevKey = getMockedOmKeyInfo(bucketInfo, 5, "key", 1); + OmKeyInfo prevKey2 = getMockedOmKeyInfo(bucketInfo, 7, "key2", 2); + OmKeyInfo currentKey = getMockedOmKeyInfo(bucketInfo, 6, "renamedKey", 1); + OmKeyInfo currentKey2 = getMockedOmKeyInfo(bucketInfo, 7, "key2", 2); + OmKeyInfo currentKey3 = getMockedOmKeyInfo(bucketInfo, 8, "key3", 3); + OmKeyInfo currentKey4 = getMockedOmKeyInfo(bucketInfo, 8, "key4", 4); + Table prevKeyTable = + getMockedTable(ImmutableMap.of( + getPath(volumeId, bucketInfo, prevKey), prevKey, + getPath(volumeId, bucketInfo, prevKey2), prevKey2)); + Table renameTable = getMockedTable( + ImmutableMap.of(getRenameKey(VOLUME_NAME, BUCKET_NAME, 1), getPath(volumeId, bucketInfo, prevKey), + getRenameKey(VOLUME_NAME, BUCKET_NAME, 3), getPath(volumeId, bucketInfo, + getMockedOmKeyInfo(bucketInfo, 6, "unknownKey", 9)))); + when(previousMetadataManager.getKeyTable(eq(bucketLayout))).thenReturn(prevKeyTable); + when(omMetadataManager.getSnapshotRenamedTable()).thenReturn(renameTable); + assertEquals(prevKey, km.getPreviousSnapshotOzoneKeyInfo(volumeId, bucketInfo, currentKey).apply(prevKM)); + assertEquals(prevKey2, km.getPreviousSnapshotOzoneKeyInfo(volumeId, bucketInfo, currentKey2).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneKeyInfo(volumeId, bucketInfo, currentKey3).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneKeyInfo(volumeId, bucketInfo, currentKey4).apply(prevKM)); + } + + @Test + public void testPreviousSnapshotOzoneDirInfo() throws IOException { + OMMetadataManager omMetadataManager = mock(OMMetadataManager.class); + when(omMetadataManager.getOzonePathKey(anyLong(), anyLong(), anyLong(), anyString())) + .thenAnswer(i -> Arrays.stream(i.getArguments()).map(Object::toString) + .collect(Collectors.joining("/"))); + when(omMetadataManager.getRenameKey(anyString(), anyString(), anyLong())).thenAnswer( + i -> getRenameKey(i.getArgument(0), i.getArgument(1), i.getArgument(2))); + + OMMetadataManager previousMetadataManager = mock(OMMetadataManager.class); + OzoneConfiguration configuration = new OzoneConfiguration(); + KeyManagerImpl km = new KeyManagerImpl(null, null, omMetadataManager, configuration, null, null, null); + KeyManagerImpl prevKM = new KeyManagerImpl(null, null, previousMetadataManager, configuration, null, null, null); + long volumeId = 1L; + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setBucketName(BUCKET_NAME).setVolumeName(VOLUME_NAME) + .setObjectID(2L).setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build(); + OmDirectoryInfo prevKey = getMockedOmDirInfo(5, "key", 1); + OmDirectoryInfo prevKey2 = getMockedOmDirInfo(7, "key2", 2); + OmKeyInfo currentKey = getMockedOmKeyInfo(bucketInfo, 6, "renamedKey", 1); + OmDirectoryInfo currentKeyDir = getMockedOmDirInfo(6, "renamedKey", 1); + OmKeyInfo currentKey2 = getMockedOmKeyInfo(bucketInfo, 7, "key2", 2); + OmDirectoryInfo currentKeyDir2 = getMockedOmDirInfo(7, "key2", 2); + OmKeyInfo currentKey3 = getMockedOmKeyInfo(bucketInfo, 8, "key3", 3); + OmDirectoryInfo currentKeyDir3 = getMockedOmDirInfo(8, "key3", 3); + OmKeyInfo currentKey4 = getMockedOmKeyInfo(bucketInfo, 8, "key4", 4); + OmDirectoryInfo currentKeyDir4 = getMockedOmDirInfo(8, "key4", 4); + Table prevDirTable = + getMockedTable(ImmutableMap.of( + getPath(volumeId, bucketInfo, prevKey), prevKey, + getPath(volumeId, bucketInfo, prevKey2), prevKey2)); + Table renameTable = getMockedTable( + ImmutableMap.of(getRenameKey(VOLUME_NAME, BUCKET_NAME, 1), getPath(volumeId, bucketInfo, prevKey), + getRenameKey(VOLUME_NAME, BUCKET_NAME, 3), getPath(volumeId, bucketInfo, + getMockedOmKeyInfo(bucketInfo, 6, "unknownKey", 9)))); + when(previousMetadataManager.getDirectoryTable()).thenReturn(prevDirTable); + when(omMetadataManager.getSnapshotRenamedTable()).thenReturn(renameTable); + assertEquals(prevKey, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey).apply(prevKM)); + assertEquals(prevKey2, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey2).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey3).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey4).apply(prevKM)); + + assertEquals(prevKey, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir).apply(prevKM)); + assertEquals(prevKey2, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir2).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir3).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir4).apply(prevKM)); + } + private void initKeyTableForMultipartTest(String keyName, String volume) throws IOException { List locationInfoGroups = new ArrayList<>(); List locationInfoList = new ArrayList<>(); diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 5a8942fe6b24..ec9e34cec720 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -613,6 +613,11 @@ default String getOpenFileName(long volumeId, long bucketId, long parentObjectId */ String getRenameKey(String volume, String bucket, long objectID); + /** + * Given renameKey, return the volume, bucket and objectID from the key. + */ + String[] splitRenameKey(String renameKey); + /** * Returns the DB key name of a multipart upload key in OM metadata store * for FSO-enabled buckets. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index a0045e175e31..c2a9aa5eaddc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -31,6 +31,8 @@ import org.apache.hadoop.ozone.om.fs.OzoneManagerFS; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.ListKeysResult; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; @@ -41,6 +43,7 @@ import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; +import org.apache.hadoop.ozone.util.CheckedFunction; /** * Handles key level commands. @@ -84,7 +87,6 @@ OmKeyInfo lookupKey(OmKeyArgs args, ResolvedBucket bucketLayout, OmKeyInfo getKeyInfo(OmKeyArgs args, ResolvedBucket buctket, String clientAddress) throws IOException; - /** * Returns a list of keys represented by {@link OmKeyInfo} * in the given bucket. @@ -135,6 +137,24 @@ List> getRenamesKeyEntries( String volume, String bucket, String startKey, int size) throws IOException; + /** + * Returns the previous snapshot's ozone keyInfo corresponding for the object. + */ + CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo directoryInfo) throws IOException; + + /** + * Returns the previous snapshot's ozone keyInfo corresponding for the object. + */ + CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmKeyInfo directoryInfo) throws IOException; + + /** + * Returns the previous snapshot's ozone keyInfo corresponding for the object. + */ + CheckedFunction getPreviousSnapshotOzoneKeyInfo( + long volumeId, OmBucketInfo bucketInfo, OmKeyInfo keyInfo) throws IOException; + /** * Returns a list deleted entries from the deletedTable. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index ec448dabf239..a1f8fe99d8a9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -171,6 +171,7 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; +import org.apache.hadoop.ozone.util.CheckedFunction; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; @@ -774,6 +775,45 @@ public List> getRenamesKeyEntries( } } + @Override + public CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo keyInfo) throws IOException { + String currentKeyPath = metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(), + keyInfo.getParentObjectID(), keyInfo.getName()); + return getPreviousSnapshotOzonePathInfo(bucketInfo, keyInfo.getObjectID(), currentKeyPath, + (km) -> km.getMetadataManager().getDirectoryTable()); + } + + @Override + public CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmKeyInfo keyInfo) throws IOException { + String currentKeyPath = metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(), + keyInfo.getParentObjectID(), keyInfo.getFileName()); + return getPreviousSnapshotOzonePathInfo(bucketInfo, keyInfo.getObjectID(), currentKeyPath, + (previousSnapshotKM) -> previousSnapshotKM.getMetadataManager().getDirectoryTable()); + } + + @Override + public CheckedFunction getPreviousSnapshotOzoneKeyInfo( + long volumeId, OmBucketInfo bucketInfo, OmKeyInfo keyInfo) throws IOException { + String currentKeyPath = bucketInfo.getBucketLayout().isFileSystemOptimized() + ? metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(), keyInfo.getParentObjectID(), + keyInfo.getFileName()) : metadataManager.getOzoneKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName(), + keyInfo.getKeyName()); + return getPreviousSnapshotOzonePathInfo(bucketInfo, keyInfo.getObjectID(), currentKeyPath, + (previousSnapshotKM) -> previousSnapshotKM.getMetadataManager().getKeyTable(bucketInfo.getBucketLayout())); + } + + + private CheckedFunction getPreviousSnapshotOzonePathInfo( + OmBucketInfo bucketInfo, long objectId, String currentKeyPath, + Function> table) throws IOException { + String renameKey = metadataManager.getRenameKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName(), objectId); + String renamedKey = metadataManager.getSnapshotRenamedTable().getIfExist(renameKey); + return (previousSnapshotKM) -> table.apply(previousSnapshotKM).get( + renamedKey != null ? renamedKey : currentKeyPath); + } + @Override public List>> getDeletedKeyEntries( String volume, String bucket, String startKey, int size) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 2053049573cc..fa70d55b2491 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -33,8 +33,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.service.SnapshotDeletingService.isBlockLocationInfoSame; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotDirExist; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; @@ -1911,6 +1911,12 @@ public String getRenameKey(String volumeName, String bucketName, return renameKey.toString(); } + @Override + public String[] splitRenameKey(String renameKey) { + String[] splitVals = renameKey.split(OM_KEY_PREFIX); + return new String[]{splitVals[1], splitVals[2], splitVals[3]}; + } + @Override public String getMultipartKey(long volumeId, long bucketId, long parentID, String fileName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index ccf77eb44ee9..0d36da711703 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OBJECT_ID_RECLAIM_BLOCKS; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.om.service.SnapshotDeletingService.isBlockLocationInfoSame; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index 8b28416f4e8b..5c2b16a604b3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -52,8 +52,6 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; @@ -330,60 +328,6 @@ boolean shouldIgnoreSnapshot(SnapshotInfo snapInfo) throws IOException { !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), snapInfo); } - // TODO: Move this util class. - public static boolean isBlockLocationInfoSame(OmKeyInfo prevKeyInfo, - OmKeyInfo deletedKeyInfo) { - - if (prevKeyInfo == null && deletedKeyInfo == null) { - LOG.debug("Both prevKeyInfo and deletedKeyInfo are null."); - return true; - } - if (prevKeyInfo == null || deletedKeyInfo == null) { - LOG.debug("prevKeyInfo: '{}' or deletedKeyInfo: '{}' is null.", - prevKeyInfo, deletedKeyInfo); - return false; - } - // For hsync, Though the blockLocationInfo of a key may not be same - // at the time of snapshot and key deletion as blocks can be appended. - // If the objectId is same then the key is same. - if (prevKeyInfo.isHsync() && deletedKeyInfo.isHsync()) { - return true; - } - - if (prevKeyInfo.getKeyLocationVersions().size() != - deletedKeyInfo.getKeyLocationVersions().size()) { - return false; - } - - OmKeyLocationInfoGroup deletedOmKeyLocation = - deletedKeyInfo.getLatestVersionLocations(); - OmKeyLocationInfoGroup prevOmKeyLocation = - prevKeyInfo.getLatestVersionLocations(); - - if (deletedOmKeyLocation == null || prevOmKeyLocation == null) { - return false; - } - - List deletedLocationList = - deletedOmKeyLocation.getLocationList(); - List prevLocationList = - prevOmKeyLocation.getLocationList(); - - if (deletedLocationList.size() != prevLocationList.size()) { - return false; - } - - for (int idx = 0; idx < deletedLocationList.size(); idx++) { - OmKeyLocationInfo deletedLocationInfo = deletedLocationList.get(idx); - OmKeyLocationInfo prevLocationInfo = prevLocationList.get(idx); - if (!deletedLocationInfo.hasSameBlockAs(prevLocationInfo)) { - return false; - } - } - - return true; - } - @Override public BackgroundTaskQueue getTasks() { BackgroundTaskQueue queue = new BackgroundTaskQueue(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index fcb29baefd9e..da582cb4ea00 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -116,7 +116,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; -import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.ListSnapshotDiffJobResponse; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; @@ -1459,8 +1458,7 @@ long generateDiffReport( private boolean isKeyModified(OmKeyInfo fromKey, OmKeyInfo toKey) { return !fromKey.isKeyInfoSame(toKey, false, false, false, false, true) - || !SnapshotDeletingService.isBlockLocationInfoSame( - fromKey, toKey); + || !SnapshotUtils.isBlockLocationInfoSame(fromKey, toKey); } private boolean isObjectModified(String fromObjectName, String toObjectName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 3b1b54751192..82513890ce23 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -40,6 +40,8 @@ import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; @@ -53,8 +55,7 @@ * Util class for snapshot diff APIs. */ public final class SnapshotUtils { - private static final Logger LOG = - LoggerFactory.getLogger(SnapshotUtils.class); + private static final Logger LOG = LoggerFactory.getLogger(SnapshotUtils.class); private SnapshotUtils() { throw new IllegalStateException("SnapshotUtils should not be initialized."); @@ -189,7 +190,7 @@ public static SnapshotInfo getPreviousSnapshot(OzoneManager ozoneManager, /** * Get the previous snapshot in the snapshot chain. */ - private static UUID getPreviousSnapshotId(SnapshotInfo snapInfo, SnapshotChainManager chainManager) + public static UUID getPreviousSnapshotId(SnapshotInfo snapInfo, SnapshotChainManager chainManager) throws IOException { // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot @@ -299,4 +300,55 @@ public static void validatePreviousSnapshotId(SnapshotInfo snapshotInfo, OMException.ResultCodes.INVALID_REQUEST); } } + + public static boolean isBlockLocationInfoSame(OmKeyInfo prevKeyInfo, + OmKeyInfo deletedKeyInfo) { + if (prevKeyInfo == null && deletedKeyInfo == null) { + LOG.debug("Both prevKeyInfo and deletedKeyInfo are null."); + return true; + } + if (prevKeyInfo == null || deletedKeyInfo == null) { + LOG.debug("prevKeyInfo: '{}' or deletedKeyInfo: '{}' is null.", + prevKeyInfo, deletedKeyInfo); + return false; + } + // For hsync, Though the blockLocationInfo of a key may not be same + // at the time of snapshot and key deletion as blocks can be appended. + // If the objectId is same then the key is same. + if (prevKeyInfo.isHsync() && deletedKeyInfo.isHsync()) { + return true; + } + + if (prevKeyInfo.getKeyLocationVersions().size() != + deletedKeyInfo.getKeyLocationVersions().size()) { + return false; + } + + OmKeyLocationInfoGroup deletedOmKeyLocation = + deletedKeyInfo.getLatestVersionLocations(); + OmKeyLocationInfoGroup prevOmKeyLocation = + prevKeyInfo.getLatestVersionLocations(); + + if (deletedOmKeyLocation == null || prevOmKeyLocation == null) { + return false; + } + + List deletedLocationList = + deletedOmKeyLocation.getLocationList(); + List prevLocationList = + prevOmKeyLocation.getLocationList(); + + if (deletedLocationList.size() != prevLocationList.size()) { + return false; + } + + for (int idx = 0; idx < deletedLocationList.size(); idx++) { + OmKeyLocationInfo deletedLocationInfo = deletedLocationList.get(idx); + OmKeyLocationInfo prevLocationInfo = prevLocationList.get(idx); + if (!deletedLocationInfo.hasSameBlockAs(prevLocationInfo)) { + return false; + } + } + return true; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java new file mode 100644 index 000000000000..1123c6a2df73 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import java.io.IOException; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; + +/** + * Filter to return deleted directories which are reclaimable based on their presence in previous snapshot in + * the snapshot chain. + */ +public class ReclaimableDirFilter extends ReclaimableFilter { + + /** + * Filter to return deleted directories which are reclaimable based on their presence in previous snapshot in + * the snapshot chain. + */ + public ReclaimableDirFilter(OzoneManager ozoneManager, + OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, + SnapshotInfo currentSnapshotInfo, KeyManager keyManager, + IOzoneManagerLock lock) { + super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, keyManager, lock, 1); + } + + @Override + protected String getVolumeName(Table.KeyValue keyValue) throws IOException { + return keyValue.getValue().getVolumeName(); + } + + @Override + protected String getBucketName(Table.KeyValue keyValue) throws IOException { + return keyValue.getValue().getBucketName(); + } + + @Override + protected Boolean isReclaimable(Table.KeyValue deletedDirInfo) throws IOException { + ReferenceCounted previousSnapshot = getPreviousOmSnapshot(0); + KeyManager prevKeyManager = previousSnapshot == null ? null : previousSnapshot.get().getKeyManager(); + return isDirReclaimable(getVolumeId(), getBucketInfo(), deletedDirInfo.getValue(), getKeyManager(), prevKeyManager); + } + + private boolean isDirReclaimable(long volumeId, OmBucketInfo bucketInfo, OmKeyInfo dirInfo, + KeyManager keyManager, KeyManager previousKeyManager) throws IOException { + if (previousKeyManager == null) { + return true; + } + OmDirectoryInfo prevDirectoryInfo = + keyManager.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, dirInfo).apply(previousKeyManager); + return prevDirectoryInfo == null || prevDirectoryInfo.getObjectID() != dirInfo.getObjectID(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java new file mode 100644 index 000000000000..f99d36f24376 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java @@ -0,0 +1,162 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.UUID; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.ratis.util.MemoizedCheckedSupplier; +import org.apache.ratis.util.function.CheckedSupplier; + +/** + * Filter to return deleted keys which are reclaimable based on their presence in previous snapshot in + * the snapshot chain. + */ +public class ReclaimableKeyFilter extends ReclaimableFilter { + private final Map exclusiveSizeMap; + private final Map exclusiveReplicatedSizeMap; + + /** + * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot + * in the snapshot chain corresponding to bucket key needs to be processed. + * @param keyManager : keyManager corresponding to snapshot or AOS. + * @param lock : Lock for Active OM. + */ + public ReclaimableKeyFilter(OzoneManager ozoneManager, + OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, + SnapshotInfo currentSnapshotInfo, KeyManager keyManager, + IOzoneManagerLock lock) { + super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, keyManager, lock, 2); + this.exclusiveSizeMap = new HashMap<>(); + this.exclusiveReplicatedSizeMap = new HashMap<>(); + } + + @Override + protected String getVolumeName(Table.KeyValue keyValue) throws IOException { + return keyValue.getValue().getVolumeName(); + } + + @Override + protected String getBucketName(Table.KeyValue keyValue) throws IOException { + return keyValue.getValue().getBucketName(); + } + + @Override + protected Boolean isReclaimable(Table.KeyValue deletedKeyInfo) throws IOException { + ReferenceCounted previousSnapshot = getPreviousOmSnapshot(1); + + + KeyManager previousKeyManager = Optional.ofNullable(previousSnapshot) + .map(i -> i.get().getKeyManager()).orElse(null); + + + // Getting keyInfo from prev snapshot's keyTable/fileTable + CheckedSupplier, IOException> previousKeyInfo = + MemoizedCheckedSupplier.valueOf(() -> getPreviousSnapshotKey(getVolumeId(), getBucketInfo(), + deletedKeyInfo.getValue(), getKeyManager(), previousKeyManager)); + // If file not present in previous snapshot then it won't be present in previous to previous snapshot either. + if (!previousKeyInfo.get().isPresent()) { + return true; + } + + ReferenceCounted previousToPreviousSnapshot = getPreviousOmSnapshot(0); + KeyManager previousToPreviousKeyManager = Optional.ofNullable(previousToPreviousSnapshot) + .map(i -> i.get().getKeyManager()).orElse(null); + + // Getting keyInfo from prev to prev snapshot's keyTable/fileTable based on keyInfo of prev keyTable + CheckedSupplier, IOException> previousPrevKeyInfo = + MemoizedCheckedSupplier.valueOf(() -> getPreviousSnapshotKey( + getVolumeId(), getBucketInfo(), previousKeyInfo.get().orElse(null), previousKeyManager, + previousToPreviousKeyManager)); + SnapshotInfo previousSnapshotInfo = getPreviousSnapshotInfo(1); + calculateExclusiveSize(previousSnapshotInfo, previousKeyInfo, previousPrevKeyInfo, + exclusiveSizeMap, exclusiveReplicatedSizeMap); + return false; + } + + + public Map getExclusiveSizeMap() { + return exclusiveSizeMap; + } + + public Map getExclusiveReplicatedSizeMap() { + return exclusiveReplicatedSizeMap; + } + + /** + * To calculate Exclusive Size for current snapshot, Check + * the next snapshot deletedTable if the deleted key is + * referenced in current snapshot and not referenced in the + * previous snapshot then that key is exclusive to the current + * snapshot. Here since we are only iterating through + * deletedTable we can check the previous and previous to + * previous snapshot to achieve the same. + * previousSnapshot - Snapshot for which exclusive size is + * getting calculating. + * currSnapshot - Snapshot's deletedTable is used to calculate + * previousSnapshot snapshot's exclusive size. + * previousToPrevSnapshot - Snapshot which is used to check + * if key is exclusive to previousSnapshot. + */ + private void calculateExclusiveSize(SnapshotInfo previousSnapshotInfo, + CheckedSupplier, IOException> keyInfoPrevSnapshot, + CheckedSupplier, IOException> keyInfoPrevToPrevSnapshot, + Map exclusiveSizes, Map exclusiveReplicatedSizes) + throws IOException { + if (keyInfoPrevSnapshot.get().isPresent() && !keyInfoPrevToPrevSnapshot.get().isPresent()) { + OmKeyInfo keyInfo = keyInfoPrevSnapshot.get().get(); + exclusiveSizes.compute(previousSnapshotInfo.getSnapshotId(), + (k, v) -> (v == null ? 0 : v) + keyInfo.getDataSize()); + exclusiveReplicatedSizes.compute(previousSnapshotInfo.getSnapshotId(), + (k, v) -> (v == null ? 0 : v) + keyInfo.getReplicatedSize()); + } + + } + + private Optional getPreviousSnapshotKey(long volumeId, OmBucketInfo bucketInfo, + OmKeyInfo keyInfo, KeyManager keyManager, + KeyManager previousKeyManager) throws IOException { + + if (keyInfo == null || previousKeyManager == null) { + return Optional.empty(); + } + OmKeyInfo prevKeyInfo = keyManager.getPreviousSnapshotOzoneKeyInfo(volumeId, bucketInfo, keyInfo) + .apply(previousKeyManager); + + // Check if objectIds are matching then the keys are the same. + if (prevKeyInfo == null || prevKeyInfo.getObjectID() != keyInfo.getObjectID()) { + return Optional.empty(); + } + return isBlockLocationInfoSame(prevKeyInfo, keyInfo) ? Optional.of(prevKeyInfo) : Optional.empty(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java new file mode 100644 index 000000000000..93ea35465763 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import java.io.IOException; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.helpers.WithObjectID; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; + +/** + * Filter to return rename table entries which are reclaimable based on the key presence in previous snapshot's + * keyTable/DirectoryTable in the snapshot chain. + */ +public class ReclaimableRenameEntryFilter extends ReclaimableFilter { + + + public ReclaimableRenameEntryFilter(OzoneManager ozoneManager, + OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, + SnapshotInfo currentSnapshotInfo, KeyManager keyManager, + IOzoneManagerLock lock) { + super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, keyManager, lock, 1); + } + + @Override + protected Boolean isReclaimable(Table.KeyValue renameEntry) throws IOException { + ReferenceCounted previousSnapshot = getPreviousOmSnapshot(0); + Table previousKeyTable = null; + Table prevDirTable = null; + if (previousSnapshot != null) { + previousKeyTable = previousSnapshot.get().getMetadataManager().getKeyTable(getBucketInfo().getBucketLayout()); + if (getBucketInfo().getBucketLayout().isFileSystemOptimized()) { + prevDirTable = previousSnapshot.get().getMetadataManager().getDirectoryTable(); + } + } + return isRenameEntryReclaimable(renameEntry, prevDirTable, previousKeyTable); + } + + @Override + protected String getVolumeName(Table.KeyValue keyValue) throws IOException { + return getKeyManager().getMetadataManager().splitRenameKey(keyValue.getKey())[0]; + } + + @Override + protected String getBucketName(Table.KeyValue keyValue) throws IOException { + return getKeyManager().getMetadataManager().splitRenameKey(keyValue.getKey())[1]; + } + + @SafeVarargs + private final boolean isRenameEntryReclaimable(Table.KeyValue renameEntry, + Table... previousTables) + throws IOException { + for (Table previousTable : previousTables) { + if (previousTable != null) { + String prevDbKey = renameEntry.getValue(); + WithObjectID withObjectID = previousTable.getIfExist(prevDbKey); + if (withObjectID != null) { + return false; + } + } + } + return true; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index b3592726d01a..6d9084c37a0d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -133,7 +133,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; -import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.om.snapshot.SnapshotTestUtils.StubbedPersistentMap; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse.CancelMessage; @@ -819,11 +818,10 @@ public void testGenerateDiffReport() throws IOException { String bucketName = "buck"; String fromSnapName = "fs"; String toSnapName = "ts"; - try (MockedStatic - mockedSnapshotDeletingService = mockStatic( - SnapshotDeletingService.class)) { - mockedSnapshotDeletingService.when(() -> - SnapshotDeletingService.isBlockLocationInfoSame(any(OmKeyInfo.class), + try (MockedStatic + mockedSnapshotUtils = mockStatic(SnapshotUtils.class)) { + mockedSnapshotUtils.when(() -> + SnapshotUtils.isBlockLocationInfoSame(any(OmKeyInfo.class), any(OmKeyInfo.class))) .thenAnswer(i -> { int keyVal = Integer.parseInt(((OmKeyInfo)i.getArgument(0)) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java new file mode 100644 index 000000000000..0a65b5919b9d --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java @@ -0,0 +1,320 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; +import static org.mockito.Mockito.CALLS_REAL_METHODS; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.DBOptions; +import org.rocksdb.ReadOptions; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksDBException; +import org.rocksdb.RocksIterator; + +/** + * Test class for ReclaimableFilter. + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class TestAbstractReclaimableFilter { + + private ReclaimableFilter reclaimableFilter; + private OzoneManager ozoneManager; + private OmSnapshotManager omSnapshotManager; + private AtomicReference> lockIds = new AtomicReference<>(Collections.emptyList()); + private List volumes; + private List buckets; + private MockedStatic mockedSnapshotUtils; + private Map> snapshotInfos; + @TempDir + private Path testDir; + private SnapshotChainManager snapshotChainManager; + private KeyManager keyManager; + + protected abstract ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, + SnapshotInfo currentSnapshotInfo, KeyManager km, + IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain); + + protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, + int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume) throws RocksDBException, IOException { + return setup(numberOfPreviousSnapshotsFromChain, actualTotalNumberOfSnapshotsInChain, index, numberOfVolumes, + numberOfBucketsPerVolume, (info) -> info, BucketLayout.FILE_SYSTEM_OPTIMIZED); + } + + protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, + int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume, BucketLayout bucketLayout) + throws RocksDBException, IOException { + return setup(numberOfPreviousSnapshotsFromChain, actualTotalNumberOfSnapshotsInChain, index, numberOfVolumes, + numberOfBucketsPerVolume, (info) -> info, bucketLayout); + } + + protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, + int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume, Function snapshotProps, + BucketLayout bucketLayout) throws IOException, RocksDBException { + this.ozoneManager = mock(OzoneManager.class); + this.snapshotChainManager = mock(SnapshotChainManager.class); + this.keyManager = mock(KeyManager.class); + IOzoneManagerLock ozoneManagerLock = mock(IOzoneManagerLock.class); + when(ozoneManagerLock.acquireReadLocks(eq(OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK), anyList())) + .thenAnswer(i -> { + lockIds.set( + (List) i.getArgument(1, List.class).stream().map(val -> UUID.fromString(((String[]) val)[0])) + .collect(Collectors.toList())); + return OMLockDetails.EMPTY_DETAILS_LOCK_ACQUIRED; + }); + when(ozoneManagerLock.releaseReadLocks(eq(OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK), anyList())) + .thenAnswer(i -> { + Assertions.assertEquals(lockIds.get(), + i.getArgument(1, List.class).stream().map(val -> UUID.fromString(((String[]) val)[0])) + .collect(Collectors.toList())); + lockIds.set(Collections.emptyList()); + return OMLockDetails.EMPTY_DETAILS_LOCK_NOT_ACQUIRED; + }); + snapshotInfos = mockSnapshotChain(actualTotalNumberOfSnapshotsInChain, + ozoneManager, snapshotChainManager, numberOfVolumes, numberOfBucketsPerVolume, snapshotProps); + mockOzoneManager(bucketLayout); + mockOmSnapshotManager(ozoneManager); + SnapshotInfo info = index >= actualTotalNumberOfSnapshotsInChain ? null : + snapshotInfos.get(getKey(volumes.get(volumes.size() - 1), buckets.get(buckets.size() - 1))).get(index); + this.reclaimableFilter = Mockito.spy(initializeFilter(ozoneManager, omSnapshotManager, snapshotChainManager, + info, keyManager, ozoneManagerLock, numberOfPreviousSnapshotsFromChain)); + return info; + } + + @AfterEach + protected void teardown() throws IOException { + this.mockedSnapshotUtils.close(); + this.reclaimableFilter.close(); + } + + private void mockOzoneManager(BucketLayout bucketLayout) throws IOException { + OMMetadataManager metadataManager = mock(OMMetadataManager.class); + when(ozoneManager.getMetadataManager()).thenReturn(metadataManager); + long volumeCount = 0; + long bucketCount = 0; + for (String volume : volumes) { + when(metadataManager.getVolumeId(eq(volume))).thenReturn(volumeCount); + for (String bucket : buckets) { + when(ozoneManager.getBucketInfo(eq(volume), eq(bucket))) + .thenReturn(OmBucketInfo.newBuilder().setVolumeName(volume).setBucketName(bucket) + .setObjectID(bucketCount).setBucketLayout(bucketLayout).build()); + bucketCount++; + } + volumeCount++; + } + } + + private void mockOmSnapshotManager(OzoneManager om) throws RocksDBException, IOException { + try (MockedStatic rocksdb = Mockito.mockStatic(ManagedRocksDB.class); + MockedConstruction mockedCache = Mockito.mockConstruction(SnapshotCache.class, + (mock, context) -> { + Map> map = new HashMap<>(); + when(mock.get(any(UUID.class))).thenAnswer(i -> { + if (snapshotInfos.values().stream().flatMap(List::stream) + .map(SnapshotInfo::getSnapshotId) + .noneMatch(id -> id.equals(i.getArgument(0, UUID.class)))) { + throw new IOException("Snapshot " + i.getArgument(0, UUID.class) + " not found"); + } + return map.computeIfAbsent(i.getArgument(0, UUID.class), (k) -> { + ReferenceCounted ref = mock(ReferenceCounted.class); + OmSnapshot omSnapshot = mock(OmSnapshot.class); + when(omSnapshot.getSnapshotID()).thenReturn(k); + when(ref.get()).thenReturn(omSnapshot); + return ref; + }); + }); + })) { + ManagedRocksDB managedRocksDB = mock(ManagedRocksDB.class); + RocksDB rocksDB = mock(RocksDB.class); + rocksdb.when(() -> ManagedRocksDB.open(any(DBOptions.class), anyString(), anyList(), anyList())) + .thenReturn(managedRocksDB); + RocksIterator emptyRocksIterator = mock(RocksIterator.class); + when(emptyRocksIterator.isValid()).thenReturn(false); + when(rocksDB.newIterator(any(ColumnFamilyHandle.class), any(ReadOptions.class))).thenReturn(emptyRocksIterator); + when(rocksDB.newIterator(any(ColumnFamilyHandle.class))).thenReturn(emptyRocksIterator); + OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); + DBStore dbStore = mock(RDBStore.class); + when(metadataManager.getStore()).thenReturn(dbStore); + when(dbStore.getRocksDBCheckpointDiffer()).thenReturn(Mockito.mock(RocksDBCheckpointDiffer.class)); + Table mockedTransactionTable = Mockito.mock(Table.class); + when(metadataManager.getTransactionInfoTable()).thenReturn(mockedTransactionTable); + when(mockedTransactionTable.getSkipCache(eq(TRANSACTION_INFO_KEY))) + .thenReturn(TransactionInfo.valueOf(0, 10)); + when(managedRocksDB.get()).thenReturn(rocksDB); + + when(rocksDB.createColumnFamily(any(ColumnFamilyDescriptor.class))) + .thenAnswer(i -> { + ColumnFamilyDescriptor descriptor = i.getArgument(0, ColumnFamilyDescriptor.class); + ColumnFamilyHandle ch = Mockito.mock(ColumnFamilyHandle.class); + when(ch.getName()).thenReturn(descriptor.getName()); + return ch; + }); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(OZONE_METADATA_DIRS, testDir.toAbsolutePath().toFile().getAbsolutePath()); + when(om.getConfiguration()).thenReturn(conf); + when(om.isFilesystemSnapshotEnabled()).thenReturn(true); + this.omSnapshotManager = new OmSnapshotManager(om); + } + } + + protected List getLastSnapshotInfos(String volume, String bucket, int numberOfSnapshotsInChain, + int index) { + List infos = getSnapshotInfos().get(getKey(volume, bucket)); + int endIndex = Math.min(index - 1, infos.size() - 1); + return IntStream.range(endIndex - numberOfSnapshotsInChain + 1, endIndex + 1).mapToObj(i -> i >= 0 ? + infos.get(i) : null).collect(Collectors.toList()); + } + + private Map> mockSnapshotChain( + int numberOfSnaphotsInChain, OzoneManager om, SnapshotChainManager chainManager, int numberOfVolumes, + int numberOfBuckets, Function snapshotInfoProp) { + volumes = IntStream.range(0, numberOfVolumes).mapToObj(i -> "volume" + i).collect(Collectors.toList()); + buckets = IntStream.range(0, numberOfBuckets).mapToObj(i -> "bucket" + i).collect(Collectors.toList()); + Map> bucketSnapshotMap = new HashMap<>(); + for (String volume : volumes) { + for (String bucket : buckets) { + bucketSnapshotMap.computeIfAbsent(getKey(volume, bucket), (k) -> new ArrayList<>()); + } + } + mockedSnapshotUtils = mockStatic(SnapshotUtils.class, CALLS_REAL_METHODS); + for (int i = 0; i < numberOfSnaphotsInChain; i++) { + for (String volume : volumes) { + for (String bucket : buckets) { + SnapshotInfo snapshotInfo = snapshotInfoProp.apply(SnapshotInfo.newInstance(volume, bucket, + "snap" + i, UUID.randomUUID(), 0)); + List infos = bucketSnapshotMap.get(getKey(volume, bucket)); + mockedSnapshotUtils.when(() -> SnapshotUtils.getSnapshotInfo(eq(ozoneManager), + eq(snapshotInfo.getTableKey()))).thenReturn(snapshotInfo); + mockedSnapshotUtils.when(() -> SnapshotUtils.getPreviousSnapshot(eq(om), eq(chainManager), + eq(snapshotInfo))).thenReturn(infos.isEmpty() ? null : infos.get(infos.size() - 1)); + infos.add(snapshotInfo); + } + } + } + + for (String volume : volumes) { + for (String bucket : buckets) { + mockedSnapshotUtils.when(() -> SnapshotUtils.getLatestSnapshotInfo( + eq(volume), eq(bucket), eq(om), eq(chainManager))) + .thenAnswer(i -> { + List infos = bucketSnapshotMap.get(getKey(volume, bucket)); + return infos.isEmpty() ? null : infos.get(infos.size() - 1); + }); + } + } + return bucketSnapshotMap; + } + + public static String getKey(String volume, String bucket) { + return volume + "/" + bucket; + } + + public Map> getSnapshotInfos() { + return snapshotInfos; + } + + public SnapshotChainManager getSnapshotChainManager() { + return snapshotChainManager; + } + + public ReclaimableFilter getReclaimableFilter() { + return reclaimableFilter; + } + + public AtomicReference> getLockIds() { + return lockIds; + } + + public List getBuckets() { + return buckets; + } + + public List getVolumes() { + return volumes; + } + + public OzoneManager getOzoneManager() { + return ozoneManager; + } + + public MockedStatic getMockedSnapshotUtils() { + return mockedSnapshotUtils; + } + + public OmSnapshotManager getOmSnapshotManager() { + return omSnapshotManager; + } + + public KeyManager getKeyManager() { + return keyManager; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java new file mode 100644 index 000000000000..590d96e165db --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.rocksdb.RocksDBException; + +/** + * Test class for ReclaimableDirFilter. + */ +public class TestReclaimableDirFilter extends TestAbstractReclaimableFilter { + @Override + protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, SnapshotInfo currentSnapshotInfo, + KeyManager km, IOzoneManagerLock lock, + int numberOfPreviousSnapshotsFromChain) { + return new ReclaimableDirFilter(om, snapshotManager, chainManager, currentSnapshotInfo, km, lock); + } + + List testReclaimableFilterArguments() { + List arguments = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 5; j++) { + arguments.add(Arguments.of(i, j)); + } + } + return arguments; + } + + private void testReclaimableDirFilter(String volume, String bucket, int index, + OmKeyInfo dirInfo, OmDirectoryInfo prevDirInfo, + Boolean expectedValue) + throws IOException { + List snapshotInfos = getLastSnapshotInfos(volume, bucket, 1, index); + SnapshotInfo prevSnapshotInfo = snapshotInfos.get(0); + OmBucketInfo bucketInfo = getOzoneManager().getBucketInfo(volume, bucket); + long volumeId = getOzoneManager().getMetadataManager().getVolumeId(volume); + KeyManager keyManager = getKeyManager(); + if (prevSnapshotInfo != null) { + ReferenceCounted prevSnap = Optional.ofNullable(prevSnapshotInfo) + .map(info -> { + try { + return getOmSnapshotManager().getActiveSnapshot(volume, bucket, info.getName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).orElse(null); + mockOmSnapshot(prevSnap); + when(keyManager.getPreviousSnapshotOzoneDirInfo(eq(volumeId), eq(bucketInfo), eq(dirInfo))) + .thenReturn((km) -> prevDirInfo); + } + + when(dirInfo.getVolumeName()).thenReturn(volume); + when(dirInfo.getBucketName()).thenReturn(bucket); + assertEquals(expectedValue, getReclaimableFilter().apply(Table.newKeyValue("key", dirInfo))); + } + + private OmKeyInfo getMockedOmKeyInfo(long objectId) { + OmKeyInfo keyInfo = mock(OmKeyInfo.class); + when(keyInfo.getObjectID()).thenReturn(objectId); + return keyInfo; + } + + private OmDirectoryInfo getMockedOmDirInfo(long objectId) { + OmDirectoryInfo keyInfo = mock(OmDirectoryInfo.class); + when(keyInfo.getObjectID()).thenReturn(objectId); + return keyInfo; + } + + private KeyManager mockOmSnapshot(ReferenceCounted snapshot) { + if (snapshot != null) { + OmSnapshot omSnapshot = snapshot.get(); + KeyManager keyManager = mock(KeyManager.class); + when(omSnapshot.getKeyManager()).thenReturn(keyManager); + return keyManager; + } + return null; + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testNonReclaimableDirectory(int actualNumberOfSnapshots, int index) throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo dirInfo = getMockedOmKeyInfo(1); + OmDirectoryInfo prevDirectoryInfo = index - 1 >= 0 ? getMockedOmDirInfo(1) : null; + testReclaimableDirFilter(volume, bucket, index, dirInfo, prevDirectoryInfo, prevDirectoryInfo == null); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableKeyWithDifferentObjId(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo dirInfo = getMockedOmKeyInfo(1); + OmDirectoryInfo prevDirectoryInfo = index - 1 >= 0 ? getMockedOmDirInfo(2) : null; + testReclaimableDirFilter(volume, bucket, index, dirInfo, prevDirectoryInfo, true); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableKeyFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableKeyFilter.java new file mode 100644 index 000000000000..88cdbeca5a54 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableKeyFilter.java @@ -0,0 +1,267 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.rocksdb.RocksDBException; + +/** + * Test class for ReclaimableKeyFilter. + */ +public class TestReclaimableKeyFilter extends TestAbstractReclaimableFilter { + @Override + protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, SnapshotInfo currentSnapshotInfo, + KeyManager km, IOzoneManagerLock lock, + int numberOfPreviousSnapshotsFromChain) { + return new ReclaimableKeyFilter(om, snapshotManager, chainManager, currentSnapshotInfo, km, lock); + } + + List testReclaimableFilterArguments() { + List arguments = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 5; j++) { + arguments.add(Arguments.of(i, j)); + } + } + return arguments; + } + + private KeyManager mockOmSnapshot(ReferenceCounted snapshot) { + if (snapshot != null) { + OmSnapshot omSnapshot = snapshot.get(); + KeyManager keyManager = mock(KeyManager.class); + when(omSnapshot.getKeyManager()).thenReturn(keyManager); + return keyManager; + } + return null; + } + + @SuppressWarnings("checkstyle:ParameterNumber") + private void testReclaimableKeyFilter(String volume, String bucket, int index, + OmKeyInfo keyInfo, OmKeyInfo prevKeyInfo, OmKeyInfo prevPrevKeyInfo, + Boolean expectedValue, + Optional size, Optional replicatedSize) + throws IOException { + List snapshotInfos = getLastSnapshotInfos(volume, bucket, 2, index); + SnapshotInfo previousToPreviousSapshotInfo = snapshotInfos.get(0); + SnapshotInfo prevSnapshotInfo = snapshotInfos.get(1); + OmBucketInfo bucketInfo = getOzoneManager().getBucketInfo(volume, bucket); + long volumeId = getOzoneManager().getMetadataManager().getVolumeId(volume); + + ReferenceCounted prevSnap = Optional.ofNullable(prevSnapshotInfo) + .map(info -> { + try { + return getOmSnapshotManager().getActiveSnapshot(volume, bucket, info.getName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).orElse(null); + ReferenceCounted prevToPrevSnap = Optional.ofNullable(previousToPreviousSapshotInfo) + .map(info -> { + try { + return getOmSnapshotManager().getActiveSnapshot(volume, bucket, info.getName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).orElse(null); + + KeyManager keyManager = getKeyManager(); + KeyManager prevKeyManager = mockOmSnapshot(prevSnap); + KeyManager prevToPrevKeyManager = mockOmSnapshot(prevToPrevSnap); + if (prevKeyManager != null) { + when(keyManager.getPreviousSnapshotOzoneKeyInfo(eq(volumeId), + eq(bucketInfo), eq(keyInfo))) + .thenReturn((km) -> prevKeyInfo); + } + if (prevKeyInfo != null && prevKeyManager != null && prevToPrevKeyManager != null) { + when(prevKeyManager.getPreviousSnapshotOzoneKeyInfo(eq(volumeId), + eq(bucketInfo), eq(prevKeyInfo))).thenReturn((km) -> prevPrevKeyInfo); + } + when(keyInfo.getVolumeName()).thenReturn(volume); + when(keyInfo.getBucketName()).thenReturn(bucket); + assertEquals(expectedValue, getReclaimableFilter().apply(Table.newKeyValue("key", keyInfo))); + ReclaimableKeyFilter keyFilter = (ReclaimableKeyFilter) getReclaimableFilter(); + if (prevSnap != null) { + assertEquals(size.map(AtomicLong::get).orElse(null), + keyFilter.getExclusiveSizeMap().get(prevSnap.get().getSnapshotID())); + assertEquals(replicatedSize.map(AtomicLong::get).orElse(null), + keyFilter.getExclusiveReplicatedSizeMap().get(prevSnap.get().getSnapshotID())); + } else { + assertTrue(keyFilter.getExclusiveReplicatedSizeMap().isEmpty()); + assertTrue(keyFilter.getExclusiveSizeMap().isEmpty()); + } + + } + + private OmKeyInfo getMockedOmKeyInfo(long objectId, long size, long replicatedSize) { + OmKeyInfo keyInfo = mock(OmKeyInfo.class); + when(keyInfo.getObjectID()).thenReturn(objectId); + when(keyInfo.getDataSize()).thenReturn(size); + when(keyInfo.getReplicatedSize()).thenReturn(replicatedSize); + return keyInfo; + } + + private OmKeyInfo getMockedOmKeyInfo(long objectId) { + return getMockedOmKeyInfo(objectId, 0, 0); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testNonReclaimableKey(int actualNumberOfSnapshots, int index) throws IOException, RocksDBException { + setup(2, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo keyInfo = getMockedOmKeyInfo(1); + OmKeyInfo prevKeyInfo = index - 1 >= 0 ? getMockedOmKeyInfo(1) : null; + OmKeyInfo prevPrevKeyInfo = index - 2 >= 0 ? getMockedOmKeyInfo(3) : null; + if (prevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevKeyInfo), eq(keyInfo))) + .thenReturn(true); + } + if (prevPrevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevPrevKeyInfo), eq(prevKeyInfo))) + .thenReturn(true); + } + Optional size = Optional.ofNullable(prevKeyInfo).map(i -> new AtomicLong()); + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo, + prevKeyInfo == null, size, size); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableKeyWithDifferentObjId(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(2, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo keyInfo = getMockedOmKeyInfo(1); + OmKeyInfo prevKeyInfo = index - 1 >= 0 ? getMockedOmKeyInfo(2) : null; + OmKeyInfo prevPrevKeyInfo = index - 2 >= 0 ? getMockedOmKeyInfo(3) : null; + if (prevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevKeyInfo), eq(keyInfo))) + .thenReturn(true); + } + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo, + true, Optional.empty(), Optional.empty()); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableKeyWithDifferentBlockIds(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(2, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo keyInfo = getMockedOmKeyInfo(1); + OmKeyInfo prevKeyInfo = index - 1 >= 0 ? getMockedOmKeyInfo(1) : null; + OmKeyInfo prevPrevKeyInfo = index - 2 >= 0 ? getMockedOmKeyInfo(3) : null; + if (prevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevKeyInfo), eq(keyInfo))) + .thenReturn(false); + } + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo, + true, Optional.empty(), Optional.empty()); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testExclusiveSizeCalculationWithNonReclaimableKey(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(2, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo keyInfo = getMockedOmKeyInfo(1, 1, 4); + OmKeyInfo prevKeyInfo = index - 1 >= 0 ? getMockedOmKeyInfo(1, 2, 5) : null; + OmKeyInfo prevPrevKeyInfo = index - 2 >= 0 ? getMockedOmKeyInfo(1, 3, 6) : null; + if (prevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevKeyInfo), eq(keyInfo))) + .thenReturn(true); + } + if (prevPrevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevPrevKeyInfo), eq(prevKeyInfo))) + .thenReturn(true); + } + + Optional size = Optional.ofNullable(prevKeyInfo) + .map(i -> prevPrevKeyInfo == null ? new AtomicLong(2) : null); + Optional replicatedSize = Optional.ofNullable(prevKeyInfo) + .map(i -> prevPrevKeyInfo == null ? new AtomicLong(5) : null); + + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo, + prevKeyInfo == null, size, replicatedSize); + if (prevPrevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevPrevKeyInfo), eq(prevKeyInfo))) + .thenReturn(false); + } + if (prevKeyInfo != null) { + size = Optional.of(size.orElse(new AtomicLong())); + replicatedSize = Optional.of(replicatedSize.orElse(new AtomicLong())); + size.get().addAndGet(2L); + replicatedSize.get().addAndGet(5L); + } + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo, + prevKeyInfo == null, size, replicatedSize); + OmKeyInfo prevPrevKeyInfo1; + if (prevPrevKeyInfo != null) { + prevPrevKeyInfo1 = getMockedOmKeyInfo(2, 3, 4); + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevPrevKeyInfo1), eq(prevKeyInfo))) + .thenReturn(true); + } else { + prevPrevKeyInfo1 = null; + } + + if (prevKeyInfo != null) { + size = Optional.of(size.orElse(new AtomicLong())); + replicatedSize = Optional.of(replicatedSize.orElse(new AtomicLong())); + size.get().addAndGet(2L); + replicatedSize.get().addAndGet(5L); + } + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo1, + prevKeyInfo == null, size, replicatedSize); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableRenameEntryFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableRenameEntryFilter.java new file mode 100644 index 000000000000..50ba1acc4e48 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableRenameEntryFilter.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableMap; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.UUID; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.rocksdb.RocksDBException; + +/** + * Test class for ReclaimableDirFilter. + */ +public class TestReclaimableRenameEntryFilter extends TestAbstractReclaimableFilter { + @Override + protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, SnapshotInfo currentSnapshotInfo, + KeyManager km, IOzoneManagerLock lock, + int numberOfPreviousSnapshotsFromChain) { + return new ReclaimableRenameEntryFilter(om, snapshotManager, chainManager, currentSnapshotInfo, km, lock); + } + + List testReclaimableFilterArguments() { + List arguments = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 5; j++) { + arguments.add(Arguments.of(i, j)); + } + } + return arguments; + } + + private void testReclaimableRenameEntryFilter(String volume, String bucket, int index, + String value, + Table keyTable, + Table dirTable, + Boolean expectedValue) + throws IOException { + List snapshotInfos = getLastSnapshotInfos(volume, bucket, 1, index); + SnapshotInfo prevSnapshotInfo = snapshotInfos.get(0); + OmBucketInfo bucketInfo = getOzoneManager().getBucketInfo(volume, bucket); + if (prevSnapshotInfo != null) { + ReferenceCounted prevSnap = Optional.ofNullable(prevSnapshotInfo) + .map(info -> { + try { + return getOmSnapshotManager().getActiveSnapshot(volume, bucket, info.getName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).orElse(null); + mockOmSnapshot(prevSnap, bucketInfo, keyTable, dirTable); + } + String key = bucketInfo.getVolumeName() + "/" + bucketInfo.getBucketName() + "/" + 1; + String[] keySplit = key.split("/"); + KeyManager km = getKeyManager(); + OMMetadataManager omMetadataManager = mock(OMMetadataManager.class); + when(km.getMetadataManager()).thenReturn(omMetadataManager); + when(omMetadataManager.splitRenameKey(eq(key))).thenReturn(keySplit); + assertEquals(expectedValue, getReclaimableFilter().apply(Table.newKeyValue(key, value))); + } + + private Table getMockedTable(Map map) throws IOException { + Table table = mock(Table.class); + when(table.get(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); + when(table.getIfExist(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); + return table; + } + + private Table getFailingMockedTable() throws IOException { + Table table = mock(Table.class); + when(table.get(anyString())).thenThrow(new IOException()); + when(table.getIfExist(anyString())).thenThrow(new IOException()); + return table; + } + + private KeyManager mockOmSnapshot(ReferenceCounted snapshot, + OmBucketInfo bucketInfo, Table keyTable, + Table dirTable) { + if (snapshot != null) { + OmSnapshot omSnapshot = snapshot.get(); + KeyManager keyManager = mock(KeyManager.class); + OMMetadataManager omMetadataManager = mock(OMMetadataManager.class); + when(omSnapshot.getMetadataManager()).thenReturn(omMetadataManager); + when(keyManager.getMetadataManager()).thenReturn(omMetadataManager); + when(omMetadataManager.getKeyTable(eq(bucketInfo.getBucketLayout()))).thenReturn(keyTable); + when(omMetadataManager.getDirectoryTable()).thenReturn(dirTable); + return keyManager; + } + return null; + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testNonReclaimableRenameEntryWithKeyNonFSO(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2, + BucketLayout.OBJECT_STORE); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + String value = UUID.randomUUID().toString(); + Table keyTable = getMockedTable(ImmutableMap.of(value, mock(OmKeyInfo.class))); + Table directoryTable = getFailingMockedTable(); + testReclaimableRenameEntryFilter(volume, bucket, index, value, keyTable, directoryTable, index == 0); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableRenameEntryWithKeyNonFSO(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2, + BucketLayout.OBJECT_STORE); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + String value = UUID.randomUUID().toString(); + Table keyTable = getMockedTable(Collections.emptyMap()); + Table directoryTable = getFailingMockedTable(); + testReclaimableRenameEntryFilter(volume, bucket, index, value, keyTable, directoryTable, true); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableRenameEntryWithFSO(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + String value = UUID.randomUUID().toString(); + Table keyTable = getMockedTable(Collections.emptyMap()); + Table directoryTable = getMockedTable(Collections.emptyMap()); + testReclaimableRenameEntryFilter(volume, bucket, index, value, keyTable, directoryTable, true); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testNonReclaimableRenameEntryWithFileFSO(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + String value = UUID.randomUUID().toString(); + Table keyTable = getMockedTable(ImmutableMap.of(value, mock(OmKeyInfo.class))); + Table directoryTable = getMockedTable(Collections.emptyMap()); + testReclaimableRenameEntryFilter(volume, bucket, index, value, keyTable, directoryTable, index == 0); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testNonReclaimableRenameEntryWithDirFSO(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + String value = UUID.randomUUID().toString(); + Table keyTable = getMockedTable(Collections.emptyMap()); + Table directoryTable = getMockedTable(ImmutableMap.of(value, mock(OmDirectoryInfo.class))); + testReclaimableRenameEntryFilter(volume, bucket, index, value, keyTable, directoryTable, index == 0); + } +}