From a9f3630cde83c2a51fed2e8b50c7f9dafff9e0bf Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 22 Oct 2024 11:23:12 -0700 Subject: [PATCH 01/14] HDDS-11244. Fix Checkstyle Change-Id: I34b91e11970c9cd785ba863dd22a4081244cede6 --- .../ozone/util/CheckedExceptionOperation.java | 35 +++ .../ozone/om/lock/OzoneManagerLock.java | 3 +- .../hadoop/ozone/om/OMMetadataManager.java | 5 + .../ozone/om/OmMetadataManagerImpl.java | 9 +- .../hadoop/ozone/om/lock/MultiLocks.java | 75 +++++ .../service/AbstractKeyDeletingService.java | 2 +- .../om/service/SnapshotDeletingService.java | 56 ---- .../om/snapshot/SnapshotDiffManager.java | 6 +- .../ozone/om/snapshot/SnapshotUtils.java | 58 +++- .../snapshot/filter/ReclaimableDirFilter.java | 116 ++++++++ .../om/snapshot/filter/ReclaimableFilter.java | 222 ++++++++++++++ .../snapshot/filter/ReclaimableKeyFilter.java | 277 ++++++++++++++++++ .../filter/ReclaimableRenameEntryFilter.java | 103 +++++++ .../om/snapshot/filter/package-info.java | 22 ++ .../hadoop/ozone/om/lock/TestMultiLocks.java | 118 ++++++++ .../om/snapshot/TestSnapshotDiffManager.java | 2 +- .../filter/TestReclaimableFilter.java | 34 +++ 17 files changed, 1076 insertions(+), 67 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java new file mode 100644 index 000000000000..c1e664422054 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.util; + +/** + * + * Represents a function that accepts one argument and produces a result. + * This is a functional interface whose functional method is apply(Object). + * Type parameters: + * – the type of the input to the function – the type of the result of the function + * - the type of exception thrown. + */ +public interface CheckedExceptionOperation { + R apply(T t) throws E; + + default CheckedExceptionOperation andThen(CheckedExceptionOperation operation) { + return (T t) -> operation.apply(this.apply(t)); + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index aadb6c02c763..a0a13b984b4b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -457,7 +457,8 @@ public enum Resource { S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31 KEY_PATH_LOCK((byte) 5, "KEY_PATH_LOCK"), //63 PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127 - SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"); // = 255 + SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"), // = 255 + SNAPSHOT_GC_LOCK((byte) 8, "SNAPSHOT_GC_LOCK"); // level of the resource private byte lockLevel; diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index ae57c18354d2..76bdd82ee362 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -605,6 +605,11 @@ default String getOpenFileName(long volumeId, long bucketId, long parentObjectId */ String getRenameKey(String volume, String bucket, long objectID); + /** + * Given renameKey, return the volume, bucket & objectID from the key. + */ + String[] splitRenameKey(String renameKey); + /** * Returns the DB key name of a multipart upload key in OM metadata store * for FSO-enabled buckets. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 4873a7db4916..7911c333aa13 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -114,8 +114,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_CHECKPOINT_DIR; -import static org.apache.hadoop.ozone.om.service.SnapshotDeletingService.isBlockLocationInfoSame; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotDirExist; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; import org.apache.hadoop.util.Time; import org.apache.ozone.compaction.log.CompactionLogEntry; @@ -2143,6 +2143,13 @@ public String getRenameKey(String volumeName, String bucketName, renameKey.append(OM_KEY_PREFIX).append(objectID); return renameKey.toString(); } + + @Override + public String[] splitRenameKey(String renameKey) { + String[] splitVals = renameKey.split(OM_KEY_PREFIX); + return new String[]{splitVals[1], splitVals[2], splitVals[3]}; + } + @Override public String getMultipartKey(long volumeId, long bucketId, long parentID, String fileName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java new file mode 100644 index 000000000000..d7fe35b6f0d3 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.lock; + +import org.apache.hadoop.ozone.om.exceptions.OMException; + +import java.util.Collection; +import java.util.LinkedList; +import java.util.Queue; + +/** + * Class to take multiple locks on a resource. + */ +public class MultiLocks { + private final Queue objectLocks; + private final IOzoneManagerLock lock; + private final OzoneManagerLock.Resource resource; + private final boolean writeLock; + + public MultiLocks(IOzoneManagerLock lock, OzoneManagerLock.Resource resource, boolean writeLock) { + this.writeLock = writeLock; + this.resource = resource; + this.lock = lock; + this.objectLocks = new LinkedList<>(); + } + + public OMLockDetails acquireLock(Collection objects) throws OMException { + if (!objectLocks.isEmpty()) { + throw new OMException("More locks cannot be acquired when locks have been already acquired. Locks acquired : " + + objectLocks, OMException.ResultCodes.INTERNAL_ERROR); + } + OMLockDetails omLockDetails = OMLockDetails.EMPTY_DETAILS_LOCK_ACQUIRED; + for (T object : objects) { + if (object != null) { + omLockDetails = this.writeLock ? lock.acquireWriteLock(resource, object.toString()) + : lock.acquireReadLock(resource, object.toString()); + objectLocks.add(object); + if (!omLockDetails.isLockAcquired()) { + break; + } + } + } + if (!omLockDetails.isLockAcquired()) { + releaseLock(); + } + return omLockDetails; + } + + public void releaseLock() { + while (!objectLocks.isEmpty()) { + T object = objectLocks.poll(); + if (this.writeLock) { + lock.releaseWriteLock(resource, object.toString()); + } else { + lock.releaseReadLock(resource, object.toString()); + } + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 7559cf9a7291..561a7e91fc55 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -61,7 +61,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OBJECT_ID_RECLAIM_BLOCKS; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.om.service.SnapshotDeletingService.isBlockLocationInfoSame; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; /** * Abstracts common code from KeyDeletingService and DirectoryDeletingService diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index edc6c7a16296..22f807ebbdba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -37,8 +37,6 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; @@ -317,60 +315,6 @@ boolean shouldIgnoreSnapshot(SnapshotInfo snapInfo) throws IOException { !OmSnapshotManager.areSnapshotChangesFlushedToDB(getOzoneManager().getMetadataManager(), snapInfo); } - // TODO: Move this util class. - public static boolean isBlockLocationInfoSame(OmKeyInfo prevKeyInfo, - OmKeyInfo deletedKeyInfo) { - - if (prevKeyInfo == null && deletedKeyInfo == null) { - LOG.debug("Both prevKeyInfo and deletedKeyInfo are null."); - return true; - } - if (prevKeyInfo == null || deletedKeyInfo == null) { - LOG.debug("prevKeyInfo: '{}' or deletedKeyInfo: '{}' is null.", - prevKeyInfo, deletedKeyInfo); - return false; - } - // For hsync, Though the blockLocationInfo of a key may not be same - // at the time of snapshot and key deletion as blocks can be appended. - // If the objectId is same then the key is same. - if (prevKeyInfo.isHsync() && deletedKeyInfo.isHsync()) { - return true; - } - - if (prevKeyInfo.getKeyLocationVersions().size() != - deletedKeyInfo.getKeyLocationVersions().size()) { - return false; - } - - OmKeyLocationInfoGroup deletedOmKeyLocation = - deletedKeyInfo.getLatestVersionLocations(); - OmKeyLocationInfoGroup prevOmKeyLocation = - prevKeyInfo.getLatestVersionLocations(); - - if (deletedOmKeyLocation == null || prevOmKeyLocation == null) { - return false; - } - - List deletedLocationList = - deletedOmKeyLocation.getLocationList(); - List prevLocationList = - prevOmKeyLocation.getLocationList(); - - if (deletedLocationList.size() != prevLocationList.size()) { - return false; - } - - for (int idx = 0; idx < deletedLocationList.size(); idx++) { - OmKeyLocationInfo deletedLocationInfo = deletedLocationList.get(idx); - OmKeyLocationInfo prevLocationInfo = prevLocationList.get(idx); - if (!deletedLocationInfo.hasSameBlockAs(prevLocationInfo)) { - return false; - } - } - - return true; - } - @Override public BackgroundTaskQueue getTasks() { BackgroundTaskQueue queue = new BackgroundTaskQueue(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 6393f12066c4..4f3b550edc0f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -46,7 +46,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; -import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; @@ -1417,8 +1416,7 @@ long generateDiffReport( private boolean isKeyModified(OmKeyInfo fromKey, OmKeyInfo toKey) { return !fromKey.isKeyInfoSame(toKey, false, false, false, false, true) - || !SnapshotDeletingService.isBlockLocationInfoSame( - fromKey, toKey); + || !SnapshotUtils.isBlockLocationInfoSame(fromKey, toKey); } private boolean isObjectModified(String fromObjectName, String toObjectName, @@ -1466,7 +1464,7 @@ private boolean isBlockLocationSame( "OmKeyInfo"); } - return SnapshotDeletingService.isBlockLocationInfoSame( + return SnapshotUtils.isBlockLocationInfoSame( (OmKeyInfo) fromObject, (OmKeyInfo) toObject); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 201a9fe0c9c9..77738bcef6a2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -25,6 +25,8 @@ import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; @@ -58,8 +60,7 @@ * Util class for snapshot diff APIs. */ public final class SnapshotUtils { - private static final Logger LOG = - LoggerFactory.getLogger(SnapshotUtils.class); + private static final Logger LOG = LoggerFactory.getLogger(SnapshotUtils.class); private SnapshotUtils() { throw new IllegalStateException("SnapshotUtils should not be initialized."); @@ -194,7 +195,7 @@ public static SnapshotInfo getPreviousSnapshot(OzoneManager ozoneManager, /** * Get the previous snapshot in the snapshot chain. */ - private static UUID getPreviousSnapshotId(SnapshotInfo snapInfo, SnapshotChainManager chainManager) + public static UUID getPreviousSnapshotId(SnapshotInfo snapInfo, SnapshotChainManager chainManager) throws IOException { // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot @@ -349,4 +350,55 @@ public static void validatePreviousSnapshotId(SnapshotInfo snapshotInfo, OMException.ResultCodes.INVALID_REQUEST); } } + + public static boolean isBlockLocationInfoSame(OmKeyInfo prevKeyInfo, + OmKeyInfo deletedKeyInfo) { + if (prevKeyInfo == null && deletedKeyInfo == null) { + LOG.debug("Both prevKeyInfo and deletedKeyInfo are null."); + return true; + } + if (prevKeyInfo == null || deletedKeyInfo == null) { + LOG.debug("prevKeyInfo: '{}' or deletedKeyInfo: '{}' is null.", + prevKeyInfo, deletedKeyInfo); + return false; + } + // For hsync, Though the blockLocationInfo of a key may not be same + // at the time of snapshot and key deletion as blocks can be appended. + // If the objectId is same then the key is same. + if (prevKeyInfo.isHsync() && deletedKeyInfo.isHsync()) { + return true; + } + + if (prevKeyInfo.getKeyLocationVersions().size() != + deletedKeyInfo.getKeyLocationVersions().size()) { + return false; + } + + OmKeyLocationInfoGroup deletedOmKeyLocation = + deletedKeyInfo.getLatestVersionLocations(); + OmKeyLocationInfoGroup prevOmKeyLocation = + prevKeyInfo.getLatestVersionLocations(); + + if (deletedOmKeyLocation == null || prevOmKeyLocation == null) { + return false; + } + + List deletedLocationList = + deletedOmKeyLocation.getLocationList(); + List prevLocationList = + prevOmKeyLocation.getLocationList(); + + if (deletedLocationList.size() != prevLocationList.size()) { + return false; + } + + for (int idx = 0; idx < deletedLocationList.size(); idx++) { + OmKeyLocationInfo deletedLocationInfo = deletedLocationList.get(idx); + OmKeyLocationInfo prevLocationInfo = prevLocationList.get(idx); + if (!deletedLocationInfo.hasSameBlockAs(prevLocationInfo)) { + return false; + } + } + return true; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java new file mode 100644 index 000000000000..3410d73c8148 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.snapshot.filter; + +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; + +import java.io.IOException; + +/** + * Filter to return deleted directories which are reclaimable based on their presence in previous snapshot in + * the snapshot chain. + */ +public class ReclaimableDirFilter extends ReclaimableFilter { + + private final OzoneManager ozoneManager; + + /** + * Filter to return deleted directories which are reclaimable based on their presence in previous snapshot in + * the snapshot chain. + * + * @param omSnapshotManager + * @param snapshotChainManager + * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot + * in the snapshot chain corresponding to bucket key needs to be processed. + * @param metadataManager : MetadataManager corresponding to snapshot or AOS. + * @param lock : Lock for Active OM. + */ + public ReclaimableDirFilter(OzoneManager ozoneManager, + OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, + SnapshotInfo currentSnapshotInfo, OMMetadataManager metadataManager, + IOzoneManagerLock lock) { + super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, metadataManager, lock, 1); + this.ozoneManager = ozoneManager; + } + + @Override + protected String getVolumeName(Table.KeyValue keyValue) throws IOException { + return keyValue.getValue().getVolumeName(); + } + + @Override + protected String getBucketName(Table.KeyValue keyValue) throws IOException { + return keyValue.getValue().getBucketName(); + } + + @Override + protected Boolean isReclaimable(Table.KeyValue deletedDirInfo) throws IOException { + ReferenceCounted previousSnapshot = getPreviousOmSnapshot(0); + Table prevDirTable = previousSnapshot == null ? null : + previousSnapshot.get().getMetadataManager().getDirectoryTable(); + return isDirReclaimable(deletedDirInfo, prevDirTable, + getMetadataManager().getSnapshotRenamedTable()); + } + + private boolean isDirReclaimable(Table.KeyValue deletedDir, + Table previousDirTable, + Table renamedTable) throws IOException { + if (previousDirTable == null) { + return true; + } + + String deletedDirDbKey = deletedDir.getKey(); + OmKeyInfo deletedDirInfo = deletedDir.getValue(); + String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( + deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), + deletedDirInfo.getObjectID()); + + /* + snapshotRenamedTable: /volumeName/bucketName/objectID -> + /volumeId/bucketId/parentId/dirName + */ + String dbKeyBeforeRename = renamedTable.getIfExist(dbRenameKey); + String prevDbKey = null; + + if (dbKeyBeforeRename != null) { + prevDbKey = dbKeyBeforeRename; + } else { + // In OMKeyDeleteResponseWithFSO OzonePathKey is converted to + // OzoneDeletePathKey. Changing it back to check the previous DirTable. + prevDbKey = ozoneManager.getMetadataManager() + .getOzoneDeletePathDirKey(deletedDirDbKey); + } + + OmDirectoryInfo prevDirectoryInfo = previousDirTable.get(prevDbKey); + if (prevDirectoryInfo == null) { + return true; + } + return prevDirectoryInfo.getObjectID() != deletedDirInfo.getObjectID(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java new file mode 100644 index 000000000000..200a35c47724 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.snapshot.filter; + +import com.google.common.collect.Lists; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.MultiLocks; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.hadoop.ozone.util.CheckedExceptionOperation; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +/** + * This class is responsible for opening last N snapshot given snapshot or AOS metadata manager by acquiring a lock. + */ +public abstract class ReclaimableFilter implements CheckedExceptionOperation, + Boolean, IOException>, Closeable { + + private final OzoneManager ozoneManager; + private final SnapshotInfo currentSnapshotInfo; + private final OmSnapshotManager omSnapshotManager; + private final SnapshotChainManager snapshotChainManager; + + private final List previousSnapshotInfos; + private final List> previousOmSnapshots; + private final MultiLocks snapshotIdLocks; + private Long volumeId; + private OmBucketInfo bucketInfo; + private final OMMetadataManager metadataManager; + private final int numberOfPreviousSnapshotsFromChain; + + /** + * Filter to return deleted keys/directories which are reclaimable based on their presence in previous snapshot in + * the snapshot chain. + * + * @param omSnapshotManager + * @param snapshotChainManager + * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot + * in the snapshot chain corresponding to bucket key needs to be processed. + * @param metadataManager : MetadataManager corresponding to snapshot or AOS. + * @param lock : Lock for Active OM. + */ + public ReclaimableFilter(OzoneManager ozoneManager, OmSnapshotManager omSnapshotManager, + SnapshotChainManager snapshotChainManager, + SnapshotInfo currentSnapshotInfo, OMMetadataManager metadataManager, + IOzoneManagerLock lock, + int numberOfPreviousSnapshotsFromChain) { + this.ozoneManager = ozoneManager; + this.omSnapshotManager = omSnapshotManager; + this.currentSnapshotInfo = currentSnapshotInfo; + this.snapshotChainManager = snapshotChainManager; + this.snapshotIdLocks = new MultiLocks<>(lock, OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK, false); + this.metadataManager = metadataManager; + this.numberOfPreviousSnapshotsFromChain = numberOfPreviousSnapshotsFromChain; + this.previousOmSnapshots = new ArrayList<>(numberOfPreviousSnapshotsFromChain); + this.previousSnapshotInfos = new ArrayList<>(numberOfPreviousSnapshotsFromChain); + } + + private List getLastNSnapshotInChain(String volume, String bucket) throws IOException { + if (currentSnapshotInfo != null && + (!currentSnapshotInfo.getVolumeName().equals(volume) || !currentSnapshotInfo.getBucketName().equals(bucket))) { + throw new IOException("Volume & Bucket name for snapshot : " + currentSnapshotInfo + " not matching for " + + "key in volume: " + volume + " bucket: " + bucket); + } + SnapshotInfo expectedPreviousSnapshotInfo = currentSnapshotInfo == null + ? SnapshotUtils.getLatestSnapshotInfo(volume, bucket, ozoneManager, snapshotChainManager) + : SnapshotUtils.getPreviousSnapshot(ozoneManager, snapshotChainManager, currentSnapshotInfo); + List snapshotInfos = Lists.newArrayList(expectedPreviousSnapshotInfo); + SnapshotInfo snapshotInfo = expectedPreviousSnapshotInfo; + while (snapshotInfos.size() < numberOfPreviousSnapshotsFromChain) { + snapshotInfo = snapshotInfo == null ? null + : SnapshotUtils.getPreviousSnapshot(ozoneManager, snapshotChainManager, snapshotInfo); + snapshotInfos.add(snapshotInfo); + // If changes made to the snapshot have not been flushed to disk, throw exception immediately, next run of + // garbage collection would process the snapshot. + if (!OmSnapshotManager.areSnapshotChangesFlushedToDB(ozoneManager.getMetadataManager(), snapshotInfo)) { + throw new IOException("Changes made to the snapshot " + snapshotInfo + " have not been flushed to the disk "); + } + } + + // Reversing list to get the correct order in chain. To ensure locking order is as per the chain ordering. + Collections.reverse(snapshotInfos); + return snapshotInfos; + } + + private boolean validateExistingLastNSnapshotsInChain(String volume, String bucket) throws IOException { + List expectedLastNSnapshotsInChain = getLastNSnapshotInChain(volume, bucket); + List expectedSnapshotIds = expectedLastNSnapshotsInChain.stream() + .map(snapshotInfo -> snapshotInfo == null ? null : snapshotInfo.getSnapshotId()) + .collect(Collectors.toList()); + List existingSnapshotIds = previousOmSnapshots.stream() + .map(omSnapshotReferenceCounted -> omSnapshotReferenceCounted == null ? null : + omSnapshotReferenceCounted.get().getSnapshotID()).collect(Collectors.toList()); + return expectedSnapshotIds.equals(existingSnapshotIds); + } + + // Initialize the last N snapshots in the chain by acquiring locks. Throw IOException if it fails. + private void initializePreviousSnapshotsFromChain(String volume, String bucket) throws IOException { + // If existing snapshotIds don't match then close all snapshots and reopen the previous N snapshots. + if (!validateExistingLastNSnapshotsInChain(volume, bucket)) { + close(); + try { + // Acquire lock only on last N-1 snapshot & current snapshot(AOS if it is null). + List expectedLastNSnapshotsInChain = getLastNSnapshotInChain(volume, bucket); + List expectedSnapshotIds = expectedLastNSnapshotsInChain.stream() + .map(snapshotInfo -> snapshotInfo == null ? null : snapshotInfo.getSnapshotId()) + .collect(Collectors.toList()); + List lockIds = new ArrayList<>(expectedSnapshotIds.subList(1, expectedSnapshotIds.size())); + lockIds.add(currentSnapshotInfo == null ? null : currentSnapshotInfo.getSnapshotId()); + + if (snapshotIdLocks.acquireLock(lockIds).isLockAcquired()) { + for (SnapshotInfo snapshotInfo : expectedLastNSnapshotsInChain) { + if (snapshotInfo != null) { + // For AOS fail operation if any of the previous snapshots are not active. currentSnapshotInfo for + // AOS will be null. + previousOmSnapshots.add(currentSnapshotInfo == null + ? omSnapshotManager.getActiveSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), + snapshotInfo.getName()) + : omSnapshotManager.getSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), + snapshotInfo.getName())); + previousSnapshotInfos.add(snapshotInfo); + } else { + previousOmSnapshots.add(null); + previousSnapshotInfos.add(null); + } + + // TODO: Getting volumeId and bucket from active OM. This would be wrong on volume & bucket renames + // support. + volumeId = ozoneManager.getMetadataManager().getVolumeId(volume); + String dbBucketKey = ozoneManager.getMetadataManager().getBucketKey(volume, bucket); + bucketInfo = ozoneManager.getMetadataManager().getBucketTable().get(dbBucketKey); + } + } else { + throw new IOException("Lock acquisition failed for last N snapshots : " + + expectedLastNSnapshotsInChain + " " + currentSnapshotInfo); + } + } catch (IOException e) { + this.close(); + throw e; + } + } + } + + @Override + public Boolean apply(Table.KeyValue keyValue) throws IOException { + String volume = getVolumeName(keyValue); + String bucket = getBucketName(keyValue); + initializePreviousSnapshotsFromChain(volume, bucket); + boolean isReclaimable = isReclaimable(keyValue); + // This is to ensure the reclamation ran on the same previous snapshot and no change occurred in the chain + // while processing the entry. + return isReclaimable && validateExistingLastNSnapshotsInChain(volume, bucket); + } + + protected abstract String getVolumeName(Table.KeyValue keyValue) throws IOException; + + protected abstract String getBucketName(Table.KeyValue keyValue) throws IOException; + + protected abstract Boolean isReclaimable(Table.KeyValue omKeyInfo) throws IOException; + + @Override + public void close() throws IOException { + this.snapshotIdLocks.releaseLock(); + for (ReferenceCounted previousOmSnapshot : previousOmSnapshots) { + previousOmSnapshot.close(); + } + previousOmSnapshots.clear(); + previousSnapshotInfos.clear(); + } + + public ReferenceCounted getPreviousOmSnapshot(int index) { + return previousOmSnapshots.get(index); + } + + public OMMetadataManager getMetadataManager() { + return metadataManager; + } + + public Long getVolumeId() { + return volumeId; + } + + public OmBucketInfo getBucketInfo() { + return bucketInfo; + } + + public SnapshotInfo getPreviousSnapshotInfo(int index) { + return previousSnapshotInfos.get(index); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java new file mode 100644 index 000000000000..00c7d30de882 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java @@ -0,0 +1,277 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.snapshot.filter; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.apache.hadoop.ozone.OzoneConsts.OBJECT_ID_RECLAIM_BLOCKS; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; + +/** + * Filter to return deleted keys which are reclaimable based on their presence in previous snapshot in + * the snapshot chain. + */ +public class ReclaimableKeyFilter extends ReclaimableFilter { + private final OzoneManager ozoneManager; + private final Map exclusiveSizeMap; + private final Map exclusiveReplicatedSizeMap; + + /** + * @param omSnapshotManager + * @param snapshotChainManager + * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot + * in the snapshot chain corresponding to bucket key needs to be processed. + * @param metadataManager : MetadataManager corresponding to snapshot or AOS. + * @param lock : Lock for Active OM. + */ + public ReclaimableKeyFilter(OzoneManager ozoneManager, + OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, + SnapshotInfo currentSnapshotInfo, OMMetadataManager metadataManager, + IOzoneManagerLock lock) { + super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, metadataManager, lock, 2); + this.ozoneManager = ozoneManager; + this.exclusiveSizeMap = new HashMap<>(); + this.exclusiveReplicatedSizeMap = new HashMap<>(); + } + + @Override + protected String getVolumeName(Table.KeyValue keyValue) throws IOException { + return keyValue.getValue().getVolumeName(); + } + + @Override + protected String getBucketName(Table.KeyValue keyValue) throws IOException { + return keyValue.getValue().getBucketName(); + } + + @Override + protected Boolean isReclaimable(Table.KeyValue deletedKeyInfo) throws IOException { + ReferenceCounted previousSnapshot = getPreviousOmSnapshot(1); + ReferenceCounted previousToPreviousSnapshot = getPreviousOmSnapshot(0); + + Table previousKeyTable = null; + Table previousPrevKeyTable = null; + + Table renamedTable = getMetadataManager().getSnapshotRenamedTable(); + Table prevRenamedTable = null; + + SnapshotInfo previousSnapshotInfo = getPreviousSnapshotInfo(1); + SnapshotInfo prevPrevSnapshotInfo = getPreviousSnapshotInfo(0); + + if (previousSnapshot != null) { + previousKeyTable = previousSnapshot.get().getMetadataManager().getKeyTable(getBucketInfo().getBucketLayout()); + prevRenamedTable = previousSnapshot.get().getMetadataManager().getSnapshotRenamedTable(); + } + if (previousToPreviousSnapshot != null) { + previousPrevKeyTable = previousToPreviousSnapshot.get().getMetadataManager() + .getKeyTable(getBucketInfo().getBucketLayout()); + } + if (isKeyReclaimable(previousKeyTable, renamedTable, deletedKeyInfo.getValue(), + getBucketInfo(), getVolumeId(), + null)) { + return true; + } + calculateExclusiveSize(previousSnapshotInfo, prevPrevSnapshotInfo, deletedKeyInfo.getValue(), getBucketInfo(), + getVolumeId(), renamedTable, previousKeyTable, prevRenamedTable, previousPrevKeyTable, exclusiveSizeMap, + exclusiveReplicatedSizeMap); + return false; + } + + + public Map getExclusiveSizeMap() { + return exclusiveSizeMap; + } + + public Map getExclusiveReplicatedSizeMap() { + return exclusiveReplicatedSizeMap; + } + + private boolean isKeyReclaimable( + Table previousKeyTable, + Table renamedTable, + OmKeyInfo deletedKeyInfo, OmBucketInfo bucketInfo, + long volumeId, HddsProtos.KeyValue.Builder renamedKeyBuilder) + throws IOException { + + String dbKey; + // Handle case when the deleted snapshot is the first snapshot. + if (previousKeyTable == null) { + return true; + } + + // These are uncommitted blocks wrapped into a pseudo KeyInfo + if (deletedKeyInfo.getObjectID() == OBJECT_ID_RECLAIM_BLOCKS) { + return true; + } + + // Construct keyTable or fileTable DB key depending on the bucket type + if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { + dbKey = ozoneManager.getMetadataManager().getOzonePathKey( + volumeId, + bucketInfo.getObjectID(), + deletedKeyInfo.getParentObjectID(), + deletedKeyInfo.getFileName()); + } else { + dbKey = ozoneManager.getMetadataManager().getOzoneKey( + deletedKeyInfo.getVolumeName(), + deletedKeyInfo.getBucketName(), + deletedKeyInfo.getKeyName()); + } + + /* + snapshotRenamedTable: + 1) /volumeName/bucketName/objectID -> + /volumeId/bucketId/parentId/fileName (FSO) + 2) /volumeName/bucketName/objectID -> + /volumeName/bucketName/keyName (non-FSO) + */ + String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( + deletedKeyInfo.getVolumeName(), deletedKeyInfo.getBucketName(), + deletedKeyInfo.getObjectID()); + + // Condition: key should not exist in snapshotRenamedTable + // of the current snapshot and keyTable of the previous snapshot. + // Check key exists in renamedTable of the Snapshot + String renamedKey = renamedTable.getIfExist(dbRenameKey); + + if (renamedKey != null && renamedKeyBuilder != null) { + renamedKeyBuilder.setKey(dbRenameKey).setValue(renamedKey); + } + // previousKeyTable is fileTable if the bucket is FSO, + // otherwise it is the keyTable. + OmKeyInfo prevKeyInfo = renamedKey != null ? previousKeyTable + .get(renamedKey) : previousKeyTable.get(dbKey); + + if (prevKeyInfo == null || + prevKeyInfo.getObjectID() != deletedKeyInfo.getObjectID()) { + return true; + } + + // For key overwrite the objectID will remain the same, In this + // case we need to check if OmKeyLocationInfo is also same. + return !isBlockLocationInfoSame(prevKeyInfo, deletedKeyInfo); + } + + /** + * To calculate Exclusive Size for current snapshot, Check + * the next snapshot deletedTable if the deleted key is + * referenced in current snapshot and not referenced in the + * previous snapshot then that key is exclusive to the current + * snapshot. Here since we are only iterating through + * deletedTable we can check the previous and previous to + * previous snapshot to achieve the same. + * previousSnapshot - Snapshot for which exclusive size is + * getting calculating. + * currSnapshot - Snapshot's deletedTable is used to calculate + * previousSnapshot snapshot's exclusive size. + * previousToPrevSnapshot - Snapshot which is used to check + * if key is exclusive to previousSnapshot. + */ + @SuppressWarnings("checkstyle:ParameterNumber") + public void calculateExclusiveSize( + SnapshotInfo previousSnapshot, + SnapshotInfo previousToPrevSnapshot, + OmKeyInfo keyInfo, + OmBucketInfo bucketInfo, long volumeId, + Table snapRenamedTable, + Table previousKeyTable, + Table prevRenamedTable, + Table previousToPrevKeyTable, + Map exclusiveSizes, + Map exclusiveReplicatedSizes) throws IOException { + String prevSnapKey = previousSnapshot.getTableKey(); + long exclusiveReplicatedSize = exclusiveReplicatedSizes.getOrDefault( + prevSnapKey, 0L) + keyInfo.getReplicatedSize(); + long exclusiveSize = exclusiveSizes.getOrDefault(prevSnapKey, 0L) + keyInfo.getDataSize(); + + // If there is no previous to previous snapshot, then + // the previous snapshot is the first snapshot. + if (previousToPrevSnapshot == null) { + exclusiveSizes.put(prevSnapKey, exclusiveSize); + exclusiveReplicatedSizes.put(prevSnapKey, + exclusiveReplicatedSize); + } else { + OmKeyInfo keyInfoPrevSnapshot = getPreviousSnapshotKeyName( + keyInfo, bucketInfo, volumeId, + snapRenamedTable, previousKeyTable); + OmKeyInfo keyInfoPrevToPrevSnapshot = getPreviousSnapshotKeyName( + keyInfoPrevSnapshot, bucketInfo, volumeId, + prevRenamedTable, previousToPrevKeyTable); + // If the previous to previous snapshot doesn't + // have the key, then it is exclusive size for the + // previous snapshot. + if (keyInfoPrevToPrevSnapshot == null) { + exclusiveSizes.put(prevSnapKey, exclusiveSize); + exclusiveReplicatedSizes.put(prevSnapKey, + exclusiveReplicatedSize); + } + } + } + + private OmKeyInfo getPreviousSnapshotKeyName(OmKeyInfo keyInfo, OmBucketInfo bucketInfo, long volumeId, + Table snapRenamedTable, Table previousKeyTable) throws IOException { + + if (keyInfo == null) { + return null; + } + + String dbKeyPrevSnap; + if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { + dbKeyPrevSnap = ozoneManager.getMetadataManager().getOzonePathKey( + volumeId, + bucketInfo.getObjectID(), + keyInfo.getParentObjectID(), + keyInfo.getFileName()); + } else { + dbKeyPrevSnap = ozoneManager.getMetadataManager().getOzoneKey( + keyInfo.getVolumeName(), + keyInfo.getBucketName(), + keyInfo.getKeyName()); + } + + String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( + keyInfo.getVolumeName(), + keyInfo.getBucketName(), + keyInfo.getObjectID()); + + String renamedKey = snapRenamedTable.getIfExist(dbRenameKey); + OmKeyInfo prevKeyInfo = renamedKey != null ? previousKeyTable.get(renamedKey) : previousKeyTable.get(dbKeyPrevSnap); + + if (prevKeyInfo == null || prevKeyInfo.getObjectID() != keyInfo.getObjectID()) { + return null; + } + + return isBlockLocationInfoSame(prevKeyInfo, keyInfo) ? prevKeyInfo : null; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java new file mode 100644 index 000000000000..48b08ed5bf42 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.ozone.om.snapshot.filter; + +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; + +import java.io.IOException; + +/** + * Filter to return rename table entries which are reclaimable based on the key presence in previous snapshot's + * keyTable/DirectoryTable in the snapshot chain. + */ +public class ReclaimableRenameEntryFilter extends ReclaimableFilter { + + /** + * + * + * @param omSnapshotManager + * @param snapshotChainManager + * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot + * in the snapshot chain corresponding to bucket key needs to be processed. + * @param metadataManager : MetadataManager corresponding to snapshot or AOS. + * @param lock : Lock for Active OM. + */ + public ReclaimableRenameEntryFilter(OzoneManager ozoneManager, + OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, + SnapshotInfo currentSnapshotInfo, OMMetadataManager metadataManager, + IOzoneManagerLock lock) { + super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, metadataManager, lock, 1); + } + + @Override + protected Boolean isReclaimable(Table.KeyValue renameEntry) throws IOException { + ReferenceCounted previousSnapshot = getPreviousOmSnapshot(0); + Table previousKeyTable = null; + Table prevDirTable = null; + if (previousSnapshot != null) { + previousKeyTable = previousSnapshot.get().getMetadataManager().getKeyTable(getBucketInfo().getBucketLayout()); + prevDirTable = previousSnapshot.get().getMetadataManager().getDirectoryTable(); + } + return isRenameEntryReclaimable(renameEntry, prevDirTable, previousKeyTable); + } + + @Override + protected String getVolumeName(Table.KeyValue keyValue) throws IOException { + return getMetadataManager().splitRenameKey(keyValue.getKey())[0]; + } + + @Override + protected String getBucketName(Table.KeyValue keyValue) throws IOException { + return getMetadataManager().splitRenameKey(keyValue.getKey())[1]; + } + + private boolean isRenameEntryReclaimable(Table.KeyValue renameEntry, + Table previousDirTable, + Table prevKeyInfoTable) throws IOException { + + if (previousDirTable == null && prevKeyInfoTable == null) { + return true; + } + String prevDbKey = renameEntry.getValue(); + + + if (previousDirTable != null) { + OmDirectoryInfo prevDirectoryInfo = previousDirTable.getIfExist(prevDbKey); + if (prevDirectoryInfo != null) { + return false; + } + } + + if (prevKeyInfoTable != null) { + OmKeyInfo omKeyInfo = prevKeyInfoTable.getIfExist(prevDbKey); + return omKeyInfo == null; + } + return true; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java new file mode 100644 index 000000000000..700f7b9c6d05 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +/** + * Package containing filter to perform reclaimable check on snapshots. + */ +package org.apache.hadoop.ozone.om.snapshot.filter; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java new file mode 100644 index 000000000000..c6c6f7c91b16 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java @@ -0,0 +1,118 @@ +package org.apache.hadoop.ozone.om.lock; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +/** + * Class to test class MultiLocks + */ +public class TestMultiLocks { + @Mock + private IOzoneManagerLock mockLock; + + @Mock + private OzoneManagerLock.Resource mockResource; + + private MultiLocks multiLocks; + + @BeforeEach + void setUp() { + // Initialize MultiLocks with mock dependencies + multiLocks = new MultiLocks<>(mockLock, mockResource, true); + } + + @Test + void testAcquireLockSuccess() throws Exception { + List objects = Arrays.asList("obj1", "obj2"); + OMLockDetails mockLockDetails = mock(OMLockDetails.class); + when(mockLockDetails.isLockAcquired()).thenReturn(true); + + // Simulate successful lock acquisition for each object + when(mockLock.acquireWriteLock(any(), anyString())).thenReturn(mockLockDetails); + + OMLockDetails result = multiLocks.acquireLock(objects); + + assertEquals(mockLockDetails, result); + verify(mockLock, times(2)).acquireWriteLock(mockResource, anyString()); + } + + @Test + void testAcquireLockFailureReleasesAll() throws Exception { + List objects = Arrays.asList("obj1", "obj2"); + OMLockDetails failedLockDetails = mock(OMLockDetails.class); + when(failedLockDetails.isLockAcquired()).thenReturn(false); + + // Simulate failure during lock acquisition + when(mockLock.acquireWriteLock(mockResource, "obj1")).thenReturn(failedLockDetails); + + OMLockDetails result = multiLocks.acquireLock(objects); + + assertEquals(failedLockDetails, result); + verify(mockLock).acquireWriteLock(mockResource, "obj1"); + verify(mockLock, never()).acquireWriteLock(mockResource, "obj2"); // No further lock attempt + + // Verify releaseLock() behavior + verify(mockLock).releaseWriteLock(mockResource, "obj1"); + } + + @Test + void testReleaseLock() throws Exception { + List objects = Arrays.asList("obj1", "obj2"); + OMLockDetails mockLockDetails = mock(OMLockDetails.class); + when(mockLockDetails.isLockAcquired()).thenReturn(true); + + // Acquire locks first + when(mockLock.acquireWriteLock(any(), anyString())).thenReturn(mockLockDetails); + multiLocks.acquireLock(objects); + + // Now release locks + multiLocks.releaseLock(); + + // Verify that locks are released in order + verify(mockLock).releaseWriteLock(mockResource, "obj1"); + verify(mockLock).releaseWriteLock(mockResource, "obj2"); + } + + @Test + void testAcquireLockWhenAlreadyAcquiredThrowsException() throws Exception { + List objects = Collections.singletonList("obj1"); + OMLockDetails mockLockDetails = mock(OMLockDetails.class); + when(mockLockDetails.isLockAcquired()).thenReturn(true); + + // Acquire a lock first + when(mockLock.acquireWriteLock(any(), anyString())).thenReturn(mockLockDetails); + multiLocks.acquireLock(objects); + + // Try acquiring locks again without releasing + OMException exception = assertThrows(OMException.class, new Executable() { + @Override + public void execute() throws Throwable { + multiLocks.acquireLock(objects); + } + }); + + assertEquals("More locks cannot be acquired when locks have been already acquired. Locks acquired : [obj1]", + exception.getMessage()); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 0f2ab6150669..7c342a87b9d6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -799,7 +799,7 @@ public void testGenerateDiffReport() throws IOException { mockedSnapshotDeletingService = mockStatic( SnapshotDeletingService.class)) { mockedSnapshotDeletingService.when(() -> - SnapshotDeletingService.isBlockLocationInfoSame(any(OmKeyInfo.class), + SnapshotUtils.isBlockLocationInfoSame(any(OmKeyInfo.class), any(OmKeyInfo.class))) .thenAnswer(i -> { int keyVal = Integer.parseInt(((OmKeyInfo)i.getArgument(0)) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java new file mode 100644 index 000000000000..327584b557e4 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java @@ -0,0 +1,34 @@ +package org.apache.hadoop.ozone.om.snapshot.filter; + +import org.junit.jupiter.api.Test; + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * Test class for ReclaimableFilter. + */ +public class TestReclaimableFilter { + + @Test + public void testReclaimableFilter() { + + } + +} From 80713fd91730f535f148d401c92f00625000002a Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Jan 2025 07:50:25 -0800 Subject: [PATCH 02/14] HDDS-11603. Fix checkstyle Change-Id: I5dbb017343b5a666c56d53af2839dcb340fdf0ba --- .../hadoop/ozone/om/lock/TestMultiLocks.java | 25 +++++++++++++------ 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java index c6c6f7c91b16..b3f33a6c35a5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java @@ -19,13 +19,27 @@ * */ +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mock; -import org.mockito.MockitoAnnotations; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; /** - * Class to test class MultiLocks + * Class to test class MultiLocks. */ public class TestMultiLocks { @Mock @@ -105,12 +119,7 @@ void testAcquireLockWhenAlreadyAcquiredThrowsException() throws Exception { multiLocks.acquireLock(objects); // Try acquiring locks again without releasing - OMException exception = assertThrows(OMException.class, new Executable() { - @Override - public void execute() throws Throwable { - multiLocks.acquireLock(objects); - } - }); + OMException exception = assertThrows(OMException.class, () -> multiLocks.acquireLock(objects)); assertEquals("More locks cannot be acquired when locks have been already acquired. Locks acquired : [obj1]", exception.getMessage()); From dcfec3ddbf9f8550a5f48974e1ec8038e2cda735 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Jan 2025 08:00:02 -0800 Subject: [PATCH 03/14] HDDS-11603. Fix checkstyle Change-Id: I67de73438c7ebc37c3d6e44bfe6e09188fb537e1 --- .../org/apache/hadoop/ozone/om/lock/TestMultiLocks.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java index b3f33a6c35a5..fff52567decd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java @@ -22,7 +22,10 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentMatchers; import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; import java.util.Arrays; import java.util.Collections; @@ -41,6 +44,7 @@ /** * Class to test class MultiLocks. */ +@ExtendWith(MockitoExtension.class) public class TestMultiLocks { @Mock private IOzoneManagerLock mockLock; @@ -68,7 +72,7 @@ void testAcquireLockSuccess() throws Exception { OMLockDetails result = multiLocks.acquireLock(objects); assertEquals(mockLockDetails, result); - verify(mockLock, times(2)).acquireWriteLock(mockResource, anyString()); + verify(mockLock, times(2)).acquireWriteLock(ArgumentMatchers.eq(mockResource), anyString()); } @Test From 7096a1cedba0c704456c286500040fdc12e8dcc0 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 20 Jan 2025 22:46:07 -0800 Subject: [PATCH 04/14] HDDS-11603. Fix javadoc Change-Id: If027d4ac4c5171ded3499b900909347d59406f36 --- .../main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 76bdd82ee362..fe755787d2eb 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -606,7 +606,7 @@ default String getOpenFileName(long volumeId, long bucketId, long parentObjectId String getRenameKey(String volume, String bucket, long objectID); /** - * Given renameKey, return the volume, bucket & objectID from the key. + * Given renameKey, return the volume, bucket and objectID from the key. */ String[] splitRenameKey(String renameKey); From 7514ac30c89382724a7cd1d4bc1ef5f7da8ed531 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 20 Jan 2025 23:17:38 -0800 Subject: [PATCH 05/14] HDDS-11603. Fix test Change-Id: I7fe6d4029abe5917e20bb82ae1b2494e2319da70 --- .../hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index dd7758ed5a8f..0d8978190184 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -49,7 +49,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; -import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.om.snapshot.SnapshotTestUtils.StubbedPersistentMap; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse.CancelMessage; @@ -804,10 +803,9 @@ public void testGenerateDiffReport() throws IOException { String bucketName = "buck"; String fromSnapName = "fs"; String toSnapName = "ts"; - try (MockedStatic - mockedSnapshotDeletingService = mockStatic( - SnapshotDeletingService.class)) { - mockedSnapshotDeletingService.when(() -> + try (MockedStatic + mockedSnapshotUtils = mockStatic(SnapshotUtils.class)) { + mockedSnapshotUtils.when(() -> SnapshotUtils.isBlockLocationInfoSame(any(OmKeyInfo.class), any(OmKeyInfo.class))) .thenAnswer(i -> { From 7c141fd9d186676ce177b68b067abeb35120301b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 18 Feb 2025 13:03:53 -0800 Subject: [PATCH 06/14] HDDS-11603. Fix checkstyle Change-Id: I7409c5f4b3ce256cdccfdf80aec0e439fc3fa48a --- .../ozone/util/CheckedExceptionOperation.java | 15 ++++--- .../ozone/om/OmMetadataManagerImpl.java | 2 +- .../hadoop/ozone/om/lock/MultiLocks.java | 18 ++++----- .../service/AbstractKeyDeletingService.java | 2 +- .../snapshot/filter/ReclaimableDirFilter.java | 18 ++++----- .../om/snapshot/filter/ReclaimableFilter.java | 30 +++++++------- .../snapshot/filter/ReclaimableKeyFilter.java | 28 +++++++------ .../filter/ReclaimableRenameEntryFilter.java | 18 ++++----- .../om/snapshot/filter/package-info.java | 15 ++++--- .../hadoop/ozone/om/lock/TestMultiLocks.java | 39 +++++++++---------- .../filter/TestReclaimableFilter.java | 22 +++++------ 11 files changed, 95 insertions(+), 112 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java index c1e664422054..ec6653bdef90 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java @@ -1,11 +1,10 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -14,8 +13,8 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ + package org.apache.hadoop.ozone.util; /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index d0de661add20..3a73081cea35 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -33,8 +33,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.service.SnapshotDeletingService.isBlockLocationInfoSame; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotDirExist; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java index d7fe35b6f0d3..1d21fbcfc490 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java @@ -1,11 +1,10 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -14,15 +13,14 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package org.apache.hadoop.ozone.om.lock; -import org.apache.hadoop.ozone.om.exceptions.OMException; +package org.apache.hadoop.ozone.om.lock; import java.util.Collection; import java.util.LinkedList; import java.util.Queue; +import org.apache.hadoop.ozone.om.exceptions.OMException; /** * Class to take multiple locks on a resource. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 7b7d1e238639..7c827a1631b5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OBJECT_ID_RECLAIM_BLOCKS; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.om.service.SnapshotDeletingService.isBlockLocationInfoSame; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java index 3410d73c8148..35545e46df46 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java @@ -1,11 +1,10 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -14,10 +13,11 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ + package org.apache.hadoop.ozone.om.snapshot.filter; +import java.io.IOException; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -30,8 +30,6 @@ import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import java.io.IOException; - /** * Filter to return deleted directories which are reclaimable based on their presence in previous snapshot in * the snapshot chain. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index 200a35c47724..15363ff8141d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -1,11 +1,10 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -14,11 +13,18 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ + package org.apache.hadoop.ozone.om.snapshot.filter; import com.google.common.collect.Lists; +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -34,14 +40,6 @@ import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.util.CheckedExceptionOperation; -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.stream.Collectors; - /** * This class is responsible for opening last N snapshot given snapshot or AOS metadata manager by acquiring a lock. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java index 00c7d30de882..53b82cb44c39 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java @@ -1,11 +1,10 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -14,10 +13,16 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ + package org.apache.hadoop.ozone.om.snapshot.filter; +import static org.apache.hadoop.ozone.OzoneConsts.OBJECT_ID_RECLAIM_BLOCKS; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -31,13 +36,6 @@ import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.apache.hadoop.ozone.OzoneConsts.OBJECT_ID_RECLAIM_BLOCKS; -import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; - /** * Filter to return deleted keys which are reclaimable based on their presence in previous snapshot in * the snapshot chain. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java index 48b08ed5bf42..df4dbd3ea4c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java @@ -1,11 +1,10 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -14,10 +13,11 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ + package org.apache.hadoop.ozone.om.snapshot.filter; +import java.io.IOException; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -30,8 +30,6 @@ import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import java.io.IOException; - /** * Filter to return rename table entries which are reclaimable based on the key presence in previous snapshot's * keyTable/DirectoryTable in the snapshot chain. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java index 700f7b9c6d05..16cdda0b6548 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java @@ -1,11 +1,10 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -14,8 +13,8 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ + /** * Package containing filter to perform reclaimable check on snapshots. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java index fff52567decd..fbdebb752670 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java @@ -1,13 +1,10 @@ -package org.apache.hadoop.ozone.om.lock; - /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -16,20 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentMatchers; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; +package org.apache.hadoop.ozone.om.lock; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -41,6 +27,17 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentMatchers; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + /** * Class to test class MultiLocks. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java index 327584b557e4..300cb5a991fd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java @@ -1,15 +1,10 @@ -package org.apache.hadoop.ozone.om.snapshot.filter; - -import org.junit.jupiter.api.Test; - /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * @@ -18,9 +13,12 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ +package org.apache.hadoop.ozone.om.snapshot.filter; + +import org.junit.jupiter.api.Test; + /** * Test class for ReclaimableFilter. */ From acdaffe127a506e2f05fabc627d3e8140002f279 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 6 Mar 2025 02:37:55 -0800 Subject: [PATCH 07/14] HDDS-11603. Refactor code simplify logic Change-Id: Icd0bb1a62e0435f4329cd1c0abdab52dce13827e --- .../ozone/om/lock/IOzoneManagerLock.java | 14 ++ .../hadoop/ozone/om/lock/OmReadOnlyLock.java | 21 ++ .../ozone/om/lock/OzoneManagerLock.java | 120 +++++++++- .../ozone/om/lock/TestOzoneManagerLock.java | 33 +++ .../hadoop/ozone/om/lock/MultiLocks.java | 73 ------ .../ozone/om/snapshot/MultiSnapshotLocks.java | 77 +++++++ .../snapshot/filter/ReclaimableDirFilter.java | 26 +-- .../om/snapshot/filter/ReclaimableFilter.java | 7 +- .../snapshot/filter/ReclaimableKeyFilter.java | 218 ++++++------------ .../filter/TestMultiSnapshotLocks.java} | 69 +++--- 10 files changed, 383 insertions(+), 275 deletions(-) delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java rename hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/{lock/TestMultiLocks.java => snapshot/filter/TestMultiSnapshotLocks.java} (57%) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java index fac864b2135a..f44bec25c55d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.om.lock; import com.google.common.annotations.VisibleForTesting; +import java.util.Collection; +import java.util.List; /** * Interface for OM Metadata locks. @@ -27,9 +29,15 @@ public interface IOzoneManagerLock { OMLockDetails acquireReadLock(OzoneManagerLock.Resource resource, String... resources); + OMLockDetails acquireReadLocks(OzoneManagerLock.Resource resource, Collection resources); + + OMLockDetails acquireWriteLock(OzoneManagerLock.Resource resource, String... resources); + OMLockDetails acquireWriteLocks(OzoneManagerLock.Resource resource, + Collection resources); + boolean acquireMultiUserLock(String firstUser, String secondUser); void releaseMultiUserLock(String firstUser, String secondUser); @@ -37,9 +45,15 @@ OMLockDetails acquireWriteLock(OzoneManagerLock.Resource resource, OMLockDetails releaseWriteLock(OzoneManagerLock.Resource resource, String... resources); + OMLockDetails releaseWriteLocks(OzoneManagerLock.Resource resource, + Collection resources); + OMLockDetails releaseReadLock(OzoneManagerLock.Resource resource, String... resources); + OMLockDetails releaseReadLocks(OzoneManagerLock.Resource resource, + Collection resources); + @VisibleForTesting int getReadHoldCount(OzoneManagerLock.Resource resource, String... resources); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OmReadOnlyLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OmReadOnlyLock.java index b1b4296cba7d..059536fe0a58 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OmReadOnlyLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OmReadOnlyLock.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.om.lock.OMLockDetails.EMPTY_DETAILS_LOCK_ACQUIRED; import static org.apache.hadoop.ozone.om.lock.OMLockDetails.EMPTY_DETAILS_LOCK_NOT_ACQUIRED; +import java.util.Collection; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource; /** @@ -34,12 +35,22 @@ public OMLockDetails acquireReadLock(Resource resource, String... resources) { return EMPTY_DETAILS_LOCK_ACQUIRED; } + @Override + public OMLockDetails acquireReadLocks(Resource resource, Collection resources) { + return EMPTY_DETAILS_LOCK_ACQUIRED; + } + @Override public OMLockDetails acquireWriteLock(Resource resource, String... resources) { return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; } + @Override + public OMLockDetails acquireWriteLocks(Resource resource, Collection resources) { + return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; + } + @Override public boolean acquireMultiUserLock(String firstUser, String secondUser) { return false; @@ -56,11 +67,21 @@ public OMLockDetails releaseWriteLock(Resource resource, return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; } + @Override + public OMLockDetails releaseWriteLocks(Resource resource, Collection resources) { + return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; + } + @Override public OMLockDetails releaseReadLock(Resource resource, String... resources) { return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; } + @Override + public OMLockDetails releaseReadLocks(Resource resource, Collection resources) { + return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; + } + @Override public int getReadHoldCount(Resource resource, String... resources) { return 0; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index 0fa19a1e7a0e..4efdd22c0f26 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -27,14 +27,18 @@ import com.google.common.util.concurrent.Striped; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.EnumMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.CompositeKey; import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.ipc.ProcessingDetails.Timing; import org.apache.hadoop.ipc.Server; @@ -122,6 +126,12 @@ private Striped createStripeLock(Resource r, return SimpleStriped.readWriteLock(size, fair); } + private Iterable bulkGetLock(Resource resource, Collection keys) { + Striped striped = stripedLockByResource.get(resource); + return striped.bulkGet(keys.stream().filter(Objects::nonNull) + .map(CompositeKey::combineKeys).collect(Collectors.toList())); + } + private ReentrantReadWriteLock getLock(Resource resource, String... keys) { Striped striped = stripedLockByResource.get(resource); Object key = combineKeys(keys); @@ -150,6 +160,28 @@ public OMLockDetails acquireReadLock(Resource resource, String... keys) { return acquireLock(resource, true, keys); } + /** + * Acquire read locks on a list of resources. + * + * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same + * thread acquiring lock again is allowed. + * + * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread + * acquiring lock again is not allowed. + * + * Special Note for USER_LOCK: Single thread can acquire single user lock/ + * multi user lock. But not both at the same time. + * @param resource - Type of the resource. + * @param keys - A list of Resource names on which user want to acquire locks. + * For Resource type BUCKET_LOCK, first param should be volume, second param + * should be bucket name. For remaining all resource only one param should + * be passed. + */ + @Override + public OMLockDetails acquireReadLocks(Resource resource, Collection keys) { + return acquireLocks(resource, true, keys); + } + /** * Acquire write lock on resource. * @@ -172,8 +204,56 @@ public OMLockDetails acquireWriteLock(Resource resource, String... keys) { return acquireLock(resource, false, keys); } + /** + * Acquire write locks on a list of resources. + * + * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same + * thread acquiring lock again is allowed. + * + * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread + * acquiring lock again is not allowed. + * + * Special Note for USER_LOCK: Single thread can acquire single user lock/ + * multi user lock. But not both at the same time. + * @param resource - Type of the resource. + * @param keys - A list of Resource names on which user want to acquire lock. + * For Resource type BUCKET_LOCK, first param should be volume, second param + * should be bucket name. For remaining all resource only one param should + * be passed. + */ + @Override + public OMLockDetails acquireWriteLocks(Resource resource, Collection keys) { + return acquireLocks(resource, false, keys); + } + + private OMLockDetails acquireLocks(Resource resource, boolean isReadLock, + Collection keys) { + omLockDetails.get().clear(); + if (!resource.canLock(lockSet.get())) { + String errorMessage = getErrorMessage(resource); + LOG.error(errorMessage); + throw new RuntimeException(errorMessage); + } + + long startWaitingTimeNanos = Time.monotonicNowNanos(); + + for (ReadWriteLock lock : bulkGetLock(resource, keys)) { + if (isReadLock) { + lock.readLock().lock(); + updateReadLockMetrics(resource, (ReentrantReadWriteLock) lock, startWaitingTimeNanos); + } else { + lock.writeLock().lock(); + updateWriteLockMetrics(resource, (ReentrantReadWriteLock) lock, startWaitingTimeNanos); + } + } + + lockSet.set(resource.setLock(lockSet.get())); + omLockDetails.get().setLockAcquired(true); + return omLockDetails.get(); + } + private OMLockDetails acquireLock(Resource resource, boolean isReadLock, - String... keys) { + String[] keys) { omLockDetails.get().clear(); if (!resource.canLock(lockSet.get())) { String errorMessage = getErrorMessage(resource); @@ -317,6 +397,11 @@ public OMLockDetails releaseWriteLock(Resource resource, String... keys) { return releaseLock(resource, false, keys); } + @Override + public OMLockDetails releaseWriteLocks(Resource resource, Collection keys) { + return releaseLocks(resource, false, keys); + } + /** * Release read lock on resource. * @param resource - Type of the resource. @@ -330,6 +415,19 @@ public OMLockDetails releaseReadLock(Resource resource, String... keys) { return releaseLock(resource, true, keys); } + /** + * Release read locks on a list of resources. + * @param resource - Type of the resource. + * @param keys - Resource names on which user want to acquire lock. + * For Resource type BUCKET_LOCK, first param should be volume, second param + * should be bucket name. For remaining all resource only one param should + * be passed. + */ + @Override + public OMLockDetails releaseReadLocks(Resource resource, Collection keys) { + return releaseLocks(resource, true, keys); + } + private OMLockDetails releaseLock(Resource resource, boolean isReadLock, String... keys) { omLockDetails.get().clear(); @@ -347,6 +445,26 @@ private OMLockDetails releaseLock(Resource resource, boolean isReadLock, return omLockDetails.get(); } + private OMLockDetails releaseLocks(Resource resource, boolean isReadLock, + Collection keys) { + omLockDetails.get().clear(); + Iterable locks = bulkGetLock(resource, keys); + + for (ReadWriteLock lock : locks) { + if (isReadLock) { + lock.readLock().unlock(); + updateReadUnlockMetrics(resource, (ReentrantReadWriteLock) lock); + } else { + boolean isWriteLocked = ((ReentrantReadWriteLock)lock).isWriteLockedByCurrentThread(); + lock.writeLock().unlock(); + updateWriteUnlockMetrics(resource, (ReentrantReadWriteLock) lock, isWriteLocked); + } + } + + lockSet.set(resource.clearLock(lockSet.get())); + return omLockDetails.get(); + } + private void updateReadUnlockMetrics(Resource resource, ReentrantReadWriteLock lock) { /* diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index 4cd44cba4b1a..7ae04e1be5da 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -25,6 +25,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Stack; import java.util.UUID; @@ -287,6 +289,37 @@ void testLockResourceParallel() throws Exception { } + @Test + void testMultiLocksResourceParallel() throws Exception { + OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); + + for (Resource resource : Resource.values()) { + final List resourceName = Arrays.asList(generateResourceName(resource), + generateResourceName(resource), generateResourceName(resource)); + lock.acquireWriteLocks(resource, resourceName.subList(1, resourceName.size())); + + AtomicBoolean gotLock = new AtomicBoolean(false); + new Thread(() -> { + lock.acquireWriteLocks(resource, resourceName.subList(0, 2)); + gotLock.set(true); + lock.releaseWriteLocks(resource, resourceName.subList(0, 2)); + }).start(); + // Let's give some time for the new thread to run + Thread.sleep(100); + // Since the new thread is trying to get lock on same resource, + // it will wait. + assertFalse(gotLock.get()); + lock.releaseWriteLocks(resource, resourceName.subList(1, resourceName.size())); + // Since we have released the lock, the new thread should have the lock + // now. + // Let's give some time for the new thread to run + Thread.sleep(100); + assertTrue(gotLock.get()); + } + + } + + @Test void testMultiLockResourceParallel() throws Exception { OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java deleted file mode 100644 index 1d21fbcfc490..000000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/MultiLocks.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.lock; - -import java.util.Collection; -import java.util.LinkedList; -import java.util.Queue; -import org.apache.hadoop.ozone.om.exceptions.OMException; - -/** - * Class to take multiple locks on a resource. - */ -public class MultiLocks { - private final Queue objectLocks; - private final IOzoneManagerLock lock; - private final OzoneManagerLock.Resource resource; - private final boolean writeLock; - - public MultiLocks(IOzoneManagerLock lock, OzoneManagerLock.Resource resource, boolean writeLock) { - this.writeLock = writeLock; - this.resource = resource; - this.lock = lock; - this.objectLocks = new LinkedList<>(); - } - - public OMLockDetails acquireLock(Collection objects) throws OMException { - if (!objectLocks.isEmpty()) { - throw new OMException("More locks cannot be acquired when locks have been already acquired. Locks acquired : " - + objectLocks, OMException.ResultCodes.INTERNAL_ERROR); - } - OMLockDetails omLockDetails = OMLockDetails.EMPTY_DETAILS_LOCK_ACQUIRED; - for (T object : objects) { - if (object != null) { - omLockDetails = this.writeLock ? lock.acquireWriteLock(resource, object.toString()) - : lock.acquireReadLock(resource, object.toString()); - objectLocks.add(object); - if (!omLockDetails.isLockAcquired()) { - break; - } - } - } - if (!omLockDetails.isLockAcquired()) { - releaseLock(); - } - return omLockDetails; - } - - public void releaseLock() { - while (!objectLocks.isEmpty()) { - T object = objectLocks.poll(); - if (this.writeLock) { - lock.releaseWriteLock(resource, object.toString()); - } else { - lock.releaseReadLock(resource, object.toString()); - } - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java new file mode 100644 index 000000000000..e80df3a60087 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; + +/** + * Class to take multiple locks on multiple snapshots. + */ +public class MultiSnapshotLocks { + private final List objectLocks; + private final IOzoneManagerLock lock; + private final OzoneManagerLock.Resource resource; + private final boolean writeLock; + + public MultiSnapshotLocks(IOzoneManagerLock lock, OzoneManagerLock.Resource resource, boolean writeLock) { + this.writeLock = writeLock; + this.resource = resource; + this.lock = lock; + this.objectLocks = new ArrayList<>(); + } + + public OMLockDetails acquireLock(Collection ids) throws OMException { + if (!objectLocks.isEmpty()) { + throw new OMException("More locks cannot be acquired when locks have been already acquired. Locks acquired : " + + objectLocks.stream().map(Arrays::toString).collect(Collectors.toList()), + OMException.ResultCodes.INTERNAL_ERROR); + } + List keys = ids.stream().map(id -> new String[] {id.toString()}).collect(Collectors.toList()); + OMLockDetails omLockDetails = this.writeLock ? lock.acquireWriteLocks(resource, keys) : + lock.acquireReadLocks(resource, keys); + if (omLockDetails.isLockAcquired()) { + objectLocks.addAll(keys); + } + return omLockDetails; + } + + public void releaseLock() { + if (this.writeLock) { + lock.releaseWriteLocks(resource, this.objectLocks); + } else { + lock.releaseReadLocks(resource, this.objectLocks); + } + this.objectLocks.clear(); + } + + @VisibleForTesting + public List getObjectLocks() { + return objectLocks; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java index 35545e46df46..d408ebbe80fc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java @@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -72,43 +73,34 @@ protected Boolean isReclaimable(Table.KeyValue deletedDirInfo ReferenceCounted previousSnapshot = getPreviousOmSnapshot(0); Table prevDirTable = previousSnapshot == null ? null : previousSnapshot.get().getMetadataManager().getDirectoryTable(); - return isDirReclaimable(deletedDirInfo, prevDirTable, + return isDirReclaimable(deletedDirInfo.getValue(), getVolumeId(), getBucketInfo(), prevDirTable, getMetadataManager().getSnapshotRenamedTable()); } - private boolean isDirReclaimable(Table.KeyValue deletedDir, + private boolean isDirReclaimable(OmKeyInfo dirInfo, + long volumeId, OmBucketInfo bucketInfo, Table previousDirTable, Table renamedTable) throws IOException { if (previousDirTable == null) { return true; } - - String deletedDirDbKey = deletedDir.getKey(); - OmKeyInfo deletedDirInfo = deletedDir.getValue(); String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( - deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), - deletedDirInfo.getObjectID()); + dirInfo.getVolumeName(), dirInfo.getBucketName(), dirInfo.getObjectID()); /* snapshotRenamedTable: /volumeName/bucketName/objectID -> /volumeId/bucketId/parentId/dirName */ String dbKeyBeforeRename = renamedTable.getIfExist(dbRenameKey); - String prevDbKey = null; - + String prevDbKey; if (dbKeyBeforeRename != null) { prevDbKey = dbKeyBeforeRename; } else { - // In OMKeyDeleteResponseWithFSO OzonePathKey is converted to - // OzoneDeletePathKey. Changing it back to check the previous DirTable. - prevDbKey = ozoneManager.getMetadataManager() - .getOzoneDeletePathDirKey(deletedDirDbKey); + prevDbKey = ozoneManager.getMetadataManager().getOzonePathKey( + volumeId, bucketInfo.getObjectID(), dirInfo.getParentObjectID(), dirInfo.getFileName()); } OmDirectoryInfo prevDirectoryInfo = previousDirTable.get(prevDbKey); - if (prevDirectoryInfo == null) { - return true; - } - return prevDirectoryInfo.getObjectID() != deletedDirInfo.getObjectID(); + return prevDirectoryInfo == null || prevDirectoryInfo.getObjectID() != dirInfo.getObjectID(); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index 15363ff8141d..7e445051f29d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -34,8 +34,8 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; -import org.apache.hadoop.ozone.om.lock.MultiLocks; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.MultiSnapshotLocks; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.util.CheckedExceptionOperation; @@ -53,7 +53,7 @@ public abstract class ReclaimableFilter implements CheckedExceptionOperation< private final List previousSnapshotInfos; private final List> previousOmSnapshots; - private final MultiLocks snapshotIdLocks; + private final MultiSnapshotLocks snapshotIdLocks; private Long volumeId; private OmBucketInfo bucketInfo; private final OMMetadataManager metadataManager; @@ -79,7 +79,7 @@ public ReclaimableFilter(OzoneManager ozoneManager, OmSnapshotManager omSnapshot this.omSnapshotManager = omSnapshotManager; this.currentSnapshotInfo = currentSnapshotInfo; this.snapshotChainManager = snapshotChainManager; - this.snapshotIdLocks = new MultiLocks<>(lock, OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK, false); + this.snapshotIdLocks = new MultiSnapshotLocks(lock, OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK, false); this.metadataManager = metadataManager; this.numberOfPreviousSnapshotsFromChain = numberOfPreviousSnapshotsFromChain; this.previousOmSnapshots = new ArrayList<>(numberOfPreviousSnapshotsFromChain); @@ -217,4 +217,5 @@ public OmBucketInfo getBucketInfo() { public SnapshotInfo getPreviousSnapshotInfo(int index) { return previousSnapshotInfos.get(index); } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java index 53b82cb44c39..6618b864c50c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java @@ -17,13 +17,14 @@ package org.apache.hadoop.ozone.om.snapshot.filter; -import static org.apache.hadoop.ozone.OzoneConsts.OBJECT_ID_RECLAIM_BLOCKS; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.isBlockLocationInfoSame; import java.io.IOException; import java.util.HashMap; import java.util.Map; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -35,6 +36,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.ratis.util.MemoizedCheckedSupplier; /** * Filter to return deleted keys which are reclaimable based on their presence in previous snapshot in @@ -42,8 +44,8 @@ */ public class ReclaimableKeyFilter extends ReclaimableFilter { private final OzoneManager ozoneManager; - private final Map exclusiveSizeMap; - private final Map exclusiveReplicatedSizeMap; + private final Map exclusiveSizeMap; + private final Map exclusiveReplicatedSizeMap; /** * @param omSnapshotManager @@ -78,109 +80,49 @@ protected Boolean isReclaimable(Table.KeyValue deletedKeyInfo ReferenceCounted previousSnapshot = getPreviousOmSnapshot(1); ReferenceCounted previousToPreviousSnapshot = getPreviousOmSnapshot(0); - Table previousKeyTable = null; - Table previousPrevKeyTable = null; + AtomicReference> previousKeyTable = new AtomicReference<>(); Table renamedTable = getMetadataManager().getSnapshotRenamedTable(); - Table prevRenamedTable = null; - - SnapshotInfo previousSnapshotInfo = getPreviousSnapshotInfo(1); - SnapshotInfo prevPrevSnapshotInfo = getPreviousSnapshotInfo(0); + AtomicReference> prevRenamedTable = new AtomicReference<>(); if (previousSnapshot != null) { - previousKeyTable = previousSnapshot.get().getMetadataManager().getKeyTable(getBucketInfo().getBucketLayout()); - prevRenamedTable = previousSnapshot.get().getMetadataManager().getSnapshotRenamedTable(); - } - if (previousToPreviousSnapshot != null) { - previousPrevKeyTable = previousToPreviousSnapshot.get().getMetadataManager() - .getKeyTable(getBucketInfo().getBucketLayout()); + previousKeyTable.set(previousSnapshot.get().getMetadataManager().getKeyTable(getBucketInfo().getBucketLayout())); + prevRenamedTable.set(previousSnapshot.get().getMetadataManager().getSnapshotRenamedTable()); } - if (isKeyReclaimable(previousKeyTable, renamedTable, deletedKeyInfo.getValue(), - getBucketInfo(), getVolumeId(), - null)) { + + // Getting keyInfo from prev snapshot's keyTable/fileTable + MemoizedCheckedSupplier, IOException> previousKeyInfo = + MemoizedCheckedSupplier.valueOf(() -> getPreviousSnapshotKey(deletedKeyInfo.getValue(), getBucketInfo(), + getVolumeId(), renamedTable, previousKeyTable.get())); + // If file not present in previous snapshot then it won't be present in previous to previous snapshot either. + if (!previousKeyInfo.get().isPresent()) { return true; } - calculateExclusiveSize(previousSnapshotInfo, prevPrevSnapshotInfo, deletedKeyInfo.getValue(), getBucketInfo(), - getVolumeId(), renamedTable, previousKeyTable, prevRenamedTable, previousPrevKeyTable, exclusiveSizeMap, - exclusiveReplicatedSizeMap); + + AtomicReference> previousPrevKeyTable = new AtomicReference<>(); + if (previousToPreviousSnapshot != null) { + previousPrevKeyTable.set(previousToPreviousSnapshot.get().getMetadataManager() + .getKeyTable(getBucketInfo().getBucketLayout())); + } + // Getting keyInfo from prev to prev snapshot's keyTable/fileTable based on keyInfo of prev keyTable + MemoizedCheckedSupplier, IOException> previousPrevKeyInfo = + MemoizedCheckedSupplier.valueOf(() -> getPreviousSnapshotKey(previousKeyInfo.get().orElse(null), + getBucketInfo(), getVolumeId(), prevRenamedTable.get(), previousPrevKeyTable.get())); + SnapshotInfo prevToPrevSnapshotInfo = getPreviousSnapshotInfo(0); + calculateExclusiveSize(prevToPrevSnapshotInfo, previousPrevKeyInfo.get().orElse(null), + exclusiveSizeMap, exclusiveReplicatedSizeMap); return false; } - public Map getExclusiveSizeMap() { + public Map getExclusiveSizeMap() { return exclusiveSizeMap; } - public Map getExclusiveReplicatedSizeMap() { + public Map getExclusiveReplicatedSizeMap() { return exclusiveReplicatedSizeMap; } - private boolean isKeyReclaimable( - Table previousKeyTable, - Table renamedTable, - OmKeyInfo deletedKeyInfo, OmBucketInfo bucketInfo, - long volumeId, HddsProtos.KeyValue.Builder renamedKeyBuilder) - throws IOException { - - String dbKey; - // Handle case when the deleted snapshot is the first snapshot. - if (previousKeyTable == null) { - return true; - } - - // These are uncommitted blocks wrapped into a pseudo KeyInfo - if (deletedKeyInfo.getObjectID() == OBJECT_ID_RECLAIM_BLOCKS) { - return true; - } - - // Construct keyTable or fileTable DB key depending on the bucket type - if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { - dbKey = ozoneManager.getMetadataManager().getOzonePathKey( - volumeId, - bucketInfo.getObjectID(), - deletedKeyInfo.getParentObjectID(), - deletedKeyInfo.getFileName()); - } else { - dbKey = ozoneManager.getMetadataManager().getOzoneKey( - deletedKeyInfo.getVolumeName(), - deletedKeyInfo.getBucketName(), - deletedKeyInfo.getKeyName()); - } - - /* - snapshotRenamedTable: - 1) /volumeName/bucketName/objectID -> - /volumeId/bucketId/parentId/fileName (FSO) - 2) /volumeName/bucketName/objectID -> - /volumeName/bucketName/keyName (non-FSO) - */ - String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( - deletedKeyInfo.getVolumeName(), deletedKeyInfo.getBucketName(), - deletedKeyInfo.getObjectID()); - - // Condition: key should not exist in snapshotRenamedTable - // of the current snapshot and keyTable of the previous snapshot. - // Check key exists in renamedTable of the Snapshot - String renamedKey = renamedTable.getIfExist(dbRenameKey); - - if (renamedKey != null && renamedKeyBuilder != null) { - renamedKeyBuilder.setKey(dbRenameKey).setValue(renamedKey); - } - // previousKeyTable is fileTable if the bucket is FSO, - // otherwise it is the keyTable. - OmKeyInfo prevKeyInfo = renamedKey != null ? previousKeyTable - .get(renamedKey) : previousKeyTable.get(dbKey); - - if (prevKeyInfo == null || - prevKeyInfo.getObjectID() != deletedKeyInfo.getObjectID()) { - return true; - } - - // For key overwrite the objectID will remain the same, In this - // case we need to check if OmKeyLocationInfo is also same. - return !isBlockLocationInfoSame(prevKeyInfo, deletedKeyInfo); - } - /** * To calculate Exclusive Size for current snapshot, Check * the next snapshot deletedTable if the deleted key is @@ -196,80 +138,54 @@ private boolean isKeyReclaimable( * previousToPrevSnapshot - Snapshot which is used to check * if key is exclusive to previousSnapshot. */ - @SuppressWarnings("checkstyle:ParameterNumber") - public void calculateExclusiveSize( - SnapshotInfo previousSnapshot, - SnapshotInfo previousToPrevSnapshot, - OmKeyInfo keyInfo, - OmBucketInfo bucketInfo, long volumeId, - Table snapRenamedTable, - Table previousKeyTable, - Table prevRenamedTable, - Table previousToPrevKeyTable, - Map exclusiveSizes, - Map exclusiveReplicatedSizes) throws IOException { - String prevSnapKey = previousSnapshot.getTableKey(); - long exclusiveReplicatedSize = exclusiveReplicatedSizes.getOrDefault( - prevSnapKey, 0L) + keyInfo.getReplicatedSize(); - long exclusiveSize = exclusiveSizes.getOrDefault(prevSnapKey, 0L) + keyInfo.getDataSize(); - - // If there is no previous to previous snapshot, then - // the previous snapshot is the first snapshot. - if (previousToPrevSnapshot == null) { - exclusiveSizes.put(prevSnapKey, exclusiveSize); - exclusiveReplicatedSizes.put(prevSnapKey, - exclusiveReplicatedSize); - } else { - OmKeyInfo keyInfoPrevSnapshot = getPreviousSnapshotKeyName( - keyInfo, bucketInfo, volumeId, - snapRenamedTable, previousKeyTable); - OmKeyInfo keyInfoPrevToPrevSnapshot = getPreviousSnapshotKeyName( - keyInfoPrevSnapshot, bucketInfo, volumeId, - prevRenamedTable, previousToPrevKeyTable); - // If the previous to previous snapshot doesn't - // have the key, then it is exclusive size for the - // previous snapshot. - if (keyInfoPrevToPrevSnapshot == null) { - exclusiveSizes.put(prevSnapKey, exclusiveSize); - exclusiveReplicatedSizes.put(prevSnapKey, - exclusiveReplicatedSize); - } + private void calculateExclusiveSize(SnapshotInfo prevToPrevSnapKey, OmKeyInfo keyInfoPrevToPrevSnapshot, + Map exclusiveSizes, Map exclusiveReplicatedSizes) { + if (keyInfoPrevToPrevSnapshot == null) { + return; } + exclusiveSizes.compute(prevToPrevSnapKey.getSnapshotId(), + (k, v) -> (v == null ? 0 : v) + keyInfoPrevToPrevSnapshot.getDataSize()); + exclusiveReplicatedSizes.compute(prevToPrevSnapKey.getSnapshotId(), + (k, v) -> (v == null ? 0 : v) + keyInfoPrevToPrevSnapshot.getReplicatedSize()); } - private OmKeyInfo getPreviousSnapshotKeyName(OmKeyInfo keyInfo, OmBucketInfo bucketInfo, long volumeId, - Table snapRenamedTable, Table previousKeyTable) throws IOException { + private Optional getPreviousSnapshotKey(OmKeyInfo keyInfo, OmBucketInfo bucketInfo, long volumeId, + Table snapRenamedTable, + Table previousKeyTable) throws IOException { - if (keyInfo == null) { - return null; + if (keyInfo == null || previousKeyTable == null) { + return Optional.empty(); } - - String dbKeyPrevSnap; - if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { - dbKeyPrevSnap = ozoneManager.getMetadataManager().getOzonePathKey( - volumeId, - bucketInfo.getObjectID(), - keyInfo.getParentObjectID(), - keyInfo.getFileName()); - } else { - dbKeyPrevSnap = ozoneManager.getMetadataManager().getOzoneKey( - keyInfo.getVolumeName(), - keyInfo.getBucketName(), - keyInfo.getKeyName()); - } - String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getObjectID()); String renamedKey = snapRenamedTable.getIfExist(dbRenameKey); - OmKeyInfo prevKeyInfo = renamedKey != null ? previousKeyTable.get(renamedKey) : previousKeyTable.get(dbKeyPrevSnap); + OmKeyInfo prevKeyInfo; + + if (renamedKey == null) { + String dbKeyPrevSnap; + if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { + dbKeyPrevSnap = ozoneManager.getMetadataManager().getOzonePathKey( + volumeId, + bucketInfo.getObjectID(), + keyInfo.getParentObjectID(), + keyInfo.getFileName()); + } else { + dbKeyPrevSnap = ozoneManager.getMetadataManager().getOzoneKey( + keyInfo.getVolumeName(), + keyInfo.getBucketName(), + keyInfo.getKeyName()); + } + prevKeyInfo = previousKeyTable.get(dbKeyPrevSnap); + } else { + prevKeyInfo = previousKeyTable.get(renamedKey); + } if (prevKeyInfo == null || prevKeyInfo.getObjectID() != keyInfo.getObjectID()) { - return null; + return Optional.empty(); } - - return isBlockLocationInfoSame(prevKeyInfo, keyInfo) ? prevKeyInfo : null; + return isBlockLocationInfoSame(prevKeyInfo, keyInfo) ? Optional.of(prevKeyInfo) : Optional.empty(); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestMultiSnapshotLocks.java similarity index 57% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestMultiSnapshotLocks.java index fbdebb752670..bb147332a7f4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/lock/TestMultiLocks.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestMultiSnapshotLocks.java @@ -15,14 +15,17 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om.lock; +package org.apache.hadoop.ozone.om.snapshot.filter; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -30,7 +33,12 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.UUID; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.MultiSnapshotLocks; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -42,87 +50,88 @@ * Class to test class MultiLocks. */ @ExtendWith(MockitoExtension.class) -public class TestMultiLocks { +public class TestMultiSnapshotLocks { @Mock private IOzoneManagerLock mockLock; @Mock private OzoneManagerLock.Resource mockResource; - private MultiLocks multiLocks; + private MultiSnapshotLocks multiSnapshotLocks; + private UUID obj1 = UUID.randomUUID(); + private UUID obj2 = UUID.randomUUID(); @BeforeEach void setUp() { // Initialize MultiLocks with mock dependencies - multiLocks = new MultiLocks<>(mockLock, mockResource, true); + multiSnapshotLocks = new MultiSnapshotLocks(mockLock, mockResource, true); } @Test void testAcquireLockSuccess() throws Exception { - List objects = Arrays.asList("obj1", "obj2"); + List objects = Arrays.asList(obj1, obj2); OMLockDetails mockLockDetails = mock(OMLockDetails.class); when(mockLockDetails.isLockAcquired()).thenReturn(true); // Simulate successful lock acquisition for each object - when(mockLock.acquireWriteLock(any(), anyString())).thenReturn(mockLockDetails); + when(mockLock.acquireWriteLocks(eq(mockResource), anyList())).thenReturn(mockLockDetails); - OMLockDetails result = multiLocks.acquireLock(objects); + OMLockDetails result = multiSnapshotLocks.acquireLock(objects); assertEquals(mockLockDetails, result); - verify(mockLock, times(2)).acquireWriteLock(ArgumentMatchers.eq(mockResource), anyString()); + verify(mockLock, times(1)).acquireWriteLocks(ArgumentMatchers.eq(mockResource), any()); } @Test void testAcquireLockFailureReleasesAll() throws Exception { - List objects = Arrays.asList("obj1", "obj2"); + + List objects = Arrays.asList(obj1, obj2); OMLockDetails failedLockDetails = mock(OMLockDetails.class); when(failedLockDetails.isLockAcquired()).thenReturn(false); // Simulate failure during lock acquisition - when(mockLock.acquireWriteLock(mockResource, "obj1")).thenReturn(failedLockDetails); + when(mockLock.acquireWriteLocks(eq(mockResource), anyCollection())).thenReturn(failedLockDetails); - OMLockDetails result = multiLocks.acquireLock(objects); + OMLockDetails result = multiSnapshotLocks.acquireLock(objects); assertEquals(failedLockDetails, result); - verify(mockLock).acquireWriteLock(mockResource, "obj1"); - verify(mockLock, never()).acquireWriteLock(mockResource, "obj2"); // No further lock attempt - - // Verify releaseLock() behavior - verify(mockLock).releaseWriteLock(mockResource, "obj1"); + assertTrue(multiSnapshotLocks.getObjectLocks().isEmpty()); } @Test void testReleaseLock() throws Exception { - List objects = Arrays.asList("obj1", "obj2"); + List objects = Arrays.asList(obj1, obj2); OMLockDetails mockLockDetails = mock(OMLockDetails.class); when(mockLockDetails.isLockAcquired()).thenReturn(true); // Acquire locks first - when(mockLock.acquireWriteLock(any(), anyString())).thenReturn(mockLockDetails); - multiLocks.acquireLock(objects); + when(mockLock.acquireWriteLocks(eq(mockResource), anyCollection())).thenReturn(mockLockDetails); + multiSnapshotLocks.acquireLock(objects); + assertFalse(multiSnapshotLocks.getObjectLocks().isEmpty()); // Now release locks - multiLocks.releaseLock(); + multiSnapshotLocks.releaseLock(); // Verify that locks are released in order - verify(mockLock).releaseWriteLock(mockResource, "obj1"); - verify(mockLock).releaseWriteLock(mockResource, "obj2"); + verify(mockLock).releaseWriteLocks(eq(mockResource), any()); + assertTrue(multiSnapshotLocks.getObjectLocks().isEmpty()); } @Test void testAcquireLockWhenAlreadyAcquiredThrowsException() throws Exception { - List objects = Collections.singletonList("obj1"); + List objects = Collections.singletonList(obj1); OMLockDetails mockLockDetails = mock(OMLockDetails.class); when(mockLockDetails.isLockAcquired()).thenReturn(true); // Acquire a lock first - when(mockLock.acquireWriteLock(any(), anyString())).thenReturn(mockLockDetails); - multiLocks.acquireLock(objects); + when(mockLock.acquireWriteLocks(any(), anyList())).thenReturn(mockLockDetails); + multiSnapshotLocks.acquireLock(objects); // Try acquiring locks again without releasing - OMException exception = assertThrows(OMException.class, () -> multiLocks.acquireLock(objects)); + OMException exception = assertThrows(OMException.class, () -> multiSnapshotLocks.acquireLock(objects)); - assertEquals("More locks cannot be acquired when locks have been already acquired. Locks acquired : [obj1]", - exception.getMessage()); + assertEquals( + String.format("More locks cannot be acquired when locks have been already acquired. Locks acquired : [[%s]]", + obj1.toString()), exception.getMessage()); } } From b94a54f01af632e69c2431b87512b604a1534b29 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 6 Mar 2025 08:15:17 -0800 Subject: [PATCH 08/14] HDDS-11603. Fix checkstyle Change-Id: Ib8617005bbe1ac29835db8dcd6a27909cf08e57f --- .../java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java | 1 - .../org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java | 1 - .../org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java | 1 - 3 files changed, 3 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java index f44bec25c55d..6926b7d9bf23 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java @@ -19,7 +19,6 @@ import com.google.common.annotations.VisibleForTesting; import java.util.Collection; -import java.util.List; /** * Interface for OM Metadata locks. diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index 7ae04e1be5da..ca986a84639d 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -26,7 +26,6 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Stack; import java.util.UUID; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java index e80df3a60087..1b5afb9e6669 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.snapshot; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; From df8c562bd7886bd33844ad13d0fbf14dc00d6896 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 6 Mar 2025 09:15:59 -0800 Subject: [PATCH 09/14] HDDS-11603. Address review comments Change-Id: I8bab35cc6dfef7fcac5e69f806904049ff584e32 --- .../ozone/util/CheckedExceptionOperation.java | 7 ++----- .../ozone/om/snapshot/MultiSnapshotLocks.java | 4 +++- .../snapshot/filter/ReclaimableDirFilter.java | 14 +++----------- .../om/snapshot/filter/ReclaimableFilter.java | 18 ++++++++++-------- .../snapshot/filter/ReclaimableKeyFilter.java | 10 +++------- .../filter/ReclaimableRenameEntryFilter.java | 4 ---- 6 files changed, 21 insertions(+), 36 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java index ec6653bdef90..4e41e54f0993 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java @@ -22,13 +22,10 @@ * Represents a function that accepts one argument and produces a result. * This is a functional interface whose functional method is apply(Object). * Type parameters: - * – the type of the input to the function – the type of the result of the function + * – the type of the input to the function + * – the type of the result of the function * - the type of exception thrown. */ public interface CheckedExceptionOperation { R apply(T t) throws E; - - default CheckedExceptionOperation andThen(CheckedExceptionOperation operation) { - return (T t) -> operation.apply(this.apply(t)); - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java index 1b5afb9e6669..86ad8807493d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java @@ -22,6 +22,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.Objects; import java.util.UUID; import java.util.stream.Collectors; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -51,7 +52,8 @@ public OMLockDetails acquireLock(Collection ids) throws OMException { + objectLocks.stream().map(Arrays::toString).collect(Collectors.toList()), OMException.ResultCodes.INTERNAL_ERROR); } - List keys = ids.stream().map(id -> new String[] {id.toString()}).collect(Collectors.toList()); + List keys = + ids.stream().filter(Objects::nonNull).map(id -> new String[] {id.toString()}).collect(Collectors.toList()); OMLockDetails omLockDetails = this.writeLock ? lock.acquireWriteLocks(resource, keys) : lock.acquireReadLocks(resource, keys); if (omLockDetails.isLockAcquired()) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java index d408ebbe80fc..863a7e33abb5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java @@ -37,14 +37,10 @@ */ public class ReclaimableDirFilter extends ReclaimableFilter { - private final OzoneManager ozoneManager; - /** * Filter to return deleted directories which are reclaimable based on their presence in previous snapshot in * the snapshot chain. * - * @param omSnapshotManager - * @param snapshotChainManager * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot * in the snapshot chain corresponding to bucket key needs to be processed. * @param metadataManager : MetadataManager corresponding to snapshot or AOS. @@ -55,7 +51,6 @@ public ReclaimableDirFilter(OzoneManager ozoneManager, SnapshotInfo currentSnapshotInfo, OMMetadataManager metadataManager, IOzoneManagerLock lock) { super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, metadataManager, lock, 1); - this.ozoneManager = ozoneManager; } @Override @@ -84,19 +79,16 @@ private boolean isDirReclaimable(OmKeyInfo dirInfo, if (previousDirTable == null) { return true; } - String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( + String dbRenameKey = getOzoneManager().getMetadataManager().getRenameKey( dirInfo.getVolumeName(), dirInfo.getBucketName(), dirInfo.getObjectID()); - /* - snapshotRenamedTable: /volumeName/bucketName/objectID -> - /volumeId/bucketId/parentId/dirName - */ + // snapshotRenamedTable: /volumeName/bucketName/objectID -> /volumeId/bucketId/parentId/dirName String dbKeyBeforeRename = renamedTable.getIfExist(dbRenameKey); String prevDbKey; if (dbKeyBeforeRename != null) { prevDbKey = dbKeyBeforeRename; } else { - prevDbKey = ozoneManager.getMetadataManager().getOzonePathKey( + prevDbKey = getOzoneManager().getMetadataManager().getOzonePathKey( volumeId, bucketInfo.getObjectID(), dirInfo.getParentObjectID(), dirInfo.getFileName()); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index 7e445051f29d..b735c6a090c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -41,7 +41,8 @@ import org.apache.hadoop.ozone.util.CheckedExceptionOperation; /** - * This class is responsible for opening last N snapshot given snapshot or AOS metadata manager by acquiring a lock. + * This class is responsible for opening last N snapshot given a snapshot metadata manager or AOS metadata manager by + * acquiring a lock. */ public abstract class ReclaimableFilter implements CheckedExceptionOperation, Boolean, IOException>, Closeable { @@ -63,8 +64,6 @@ public abstract class ReclaimableFilter implements CheckedExceptionOperation< * Filter to return deleted keys/directories which are reclaimable based on their presence in previous snapshot in * the snapshot chain. * - * @param omSnapshotManager - * @param snapshotChainManager * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot * in the snapshot chain corresponding to bucket key needs to be processed. * @param metadataManager : MetadataManager corresponding to snapshot or AOS. @@ -198,24 +197,27 @@ public void close() throws IOException { previousSnapshotInfos.clear(); } - public ReferenceCounted getPreviousOmSnapshot(int index) { + protected ReferenceCounted getPreviousOmSnapshot(int index) { return previousOmSnapshots.get(index); } - public OMMetadataManager getMetadataManager() { + protected OMMetadataManager getMetadataManager() { return metadataManager; } - public Long getVolumeId() { + protected Long getVolumeId() { return volumeId; } - public OmBucketInfo getBucketInfo() { + protected OmBucketInfo getBucketInfo() { return bucketInfo; } - public SnapshotInfo getPreviousSnapshotInfo(int index) { + protected SnapshotInfo getPreviousSnapshotInfo(int index) { return previousSnapshotInfos.get(index); } + protected OzoneManager getOzoneManager() { + return ozoneManager; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java index 6618b864c50c..79280922ca75 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java @@ -43,13 +43,10 @@ * the snapshot chain. */ public class ReclaimableKeyFilter extends ReclaimableFilter { - private final OzoneManager ozoneManager; private final Map exclusiveSizeMap; private final Map exclusiveReplicatedSizeMap; /** - * @param omSnapshotManager - * @param snapshotChainManager * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot * in the snapshot chain corresponding to bucket key needs to be processed. * @param metadataManager : MetadataManager corresponding to snapshot or AOS. @@ -60,7 +57,6 @@ public ReclaimableKeyFilter(OzoneManager ozoneManager, SnapshotInfo currentSnapshotInfo, OMMetadataManager metadataManager, IOzoneManagerLock lock) { super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, metadataManager, lock, 2); - this.ozoneManager = ozoneManager; this.exclusiveSizeMap = new HashMap<>(); this.exclusiveReplicatedSizeMap = new HashMap<>(); } @@ -156,7 +152,7 @@ private Optional getPreviousSnapshotKey(OmKeyInfo keyInfo, OmBucketIn if (keyInfo == null || previousKeyTable == null) { return Optional.empty(); } - String dbRenameKey = ozoneManager.getMetadataManager().getRenameKey( + String dbRenameKey = getOzoneManager().getMetadataManager().getRenameKey( keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getObjectID()); @@ -167,13 +163,13 @@ private Optional getPreviousSnapshotKey(OmKeyInfo keyInfo, OmBucketIn if (renamedKey == null) { String dbKeyPrevSnap; if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { - dbKeyPrevSnap = ozoneManager.getMetadataManager().getOzonePathKey( + dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzonePathKey( volumeId, bucketInfo.getObjectID(), keyInfo.getParentObjectID(), keyInfo.getFileName()); } else { - dbKeyPrevSnap = ozoneManager.getMetadataManager().getOzoneKey( + dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzoneKey( keyInfo.getVolumeName(), keyInfo.getBucketName(), keyInfo.getKeyName()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java index df4dbd3ea4c6..3dfc2e73b00b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java @@ -37,10 +37,6 @@ public class ReclaimableRenameEntryFilter extends ReclaimableFilter { /** - * - * - * @param omSnapshotManager - * @param snapshotChainManager * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot * in the snapshot chain corresponding to bucket key needs to be processed. * @param metadataManager : MetadataManager corresponding to snapshot or AOS. From 8b7c73813b6d95a6b619551fea9b726e2130dd1b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 6 Mar 2025 09:21:23 -0800 Subject: [PATCH 10/14] HDDS-11603. Remove over nesting on snapshot initialize in filter Change-Id: I81496bcab937444954a0c50d2790812c5412c14d --- .../om/snapshot/filter/ReclaimableFilter.java | 79 ++++++++++--------- 1 file changed, 40 insertions(+), 39 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index b735c6a090c6..aa7cf8871bbc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -125,53 +125,54 @@ private boolean validateExistingLastNSnapshotsInChain(String volume, String buck // Initialize the last N snapshots in the chain by acquiring locks. Throw IOException if it fails. private void initializePreviousSnapshotsFromChain(String volume, String bucket) throws IOException { + if (validateExistingLastNSnapshotsInChain(volume, bucket)) { + return; + } // If existing snapshotIds don't match then close all snapshots and reopen the previous N snapshots. - if (!validateExistingLastNSnapshotsInChain(volume, bucket)) { - close(); - try { - // Acquire lock only on last N-1 snapshot & current snapshot(AOS if it is null). - List expectedLastNSnapshotsInChain = getLastNSnapshotInChain(volume, bucket); - List expectedSnapshotIds = expectedLastNSnapshotsInChain.stream() - .map(snapshotInfo -> snapshotInfo == null ? null : snapshotInfo.getSnapshotId()) - .collect(Collectors.toList()); - List lockIds = new ArrayList<>(expectedSnapshotIds.subList(1, expectedSnapshotIds.size())); - lockIds.add(currentSnapshotInfo == null ? null : currentSnapshotInfo.getSnapshotId()); - - if (snapshotIdLocks.acquireLock(lockIds).isLockAcquired()) { - for (SnapshotInfo snapshotInfo : expectedLastNSnapshotsInChain) { - if (snapshotInfo != null) { - // For AOS fail operation if any of the previous snapshots are not active. currentSnapshotInfo for - // AOS will be null. - previousOmSnapshots.add(currentSnapshotInfo == null - ? omSnapshotManager.getActiveSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), - snapshotInfo.getName()) - : omSnapshotManager.getSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), - snapshotInfo.getName())); - previousSnapshotInfos.add(snapshotInfo); - } else { - previousOmSnapshots.add(null); - previousSnapshotInfos.add(null); - } - - // TODO: Getting volumeId and bucket from active OM. This would be wrong on volume & bucket renames - // support. - volumeId = ozoneManager.getMetadataManager().getVolumeId(volume); - String dbBucketKey = ozoneManager.getMetadataManager().getBucketKey(volume, bucket); - bucketInfo = ozoneManager.getMetadataManager().getBucketTable().get(dbBucketKey); + close(); + try { + // Acquire lock only on last N-1 snapshot & current snapshot(AOS if it is null). + List expectedLastNSnapshotsInChain = getLastNSnapshotInChain(volume, bucket); + List expectedSnapshotIds = expectedLastNSnapshotsInChain.stream() + .map(snapshotInfo -> snapshotInfo == null ? null : snapshotInfo.getSnapshotId()) + .collect(Collectors.toList()); + List lockIds = new ArrayList<>(expectedSnapshotIds.subList(1, expectedSnapshotIds.size())); + lockIds.add(currentSnapshotInfo == null ? null : currentSnapshotInfo.getSnapshotId()); + + if (snapshotIdLocks.acquireLock(lockIds).isLockAcquired()) { + for (SnapshotInfo snapshotInfo : expectedLastNSnapshotsInChain) { + if (snapshotInfo != null) { + // For AOS fail operation if any of the previous snapshots are not active. currentSnapshotInfo for + // AOS will be null. + previousOmSnapshots.add(currentSnapshotInfo == null + ? omSnapshotManager.getActiveSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), + snapshotInfo.getName()) + : omSnapshotManager.getSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), + snapshotInfo.getName())); + previousSnapshotInfos.add(snapshotInfo); + } else { + previousOmSnapshots.add(null); + previousSnapshotInfos.add(null); } - } else { - throw new IOException("Lock acquisition failed for last N snapshots : " + - expectedLastNSnapshotsInChain + " " + currentSnapshotInfo); + + // TODO: Getting volumeId and bucket from active OM. This would be wrong on volume & bucket renames + // support. + volumeId = ozoneManager.getMetadataManager().getVolumeId(volume); + String dbBucketKey = ozoneManager.getMetadataManager().getBucketKey(volume, bucket); + bucketInfo = ozoneManager.getMetadataManager().getBucketTable().get(dbBucketKey); } - } catch (IOException e) { - this.close(); - throw e; + } else { + throw new IOException("Lock acquisition failed for last N snapshots : " + + expectedLastNSnapshotsInChain + " " + currentSnapshotInfo); } + } catch (IOException e) { + this.close(); + throw e; } } @Override - public Boolean apply(Table.KeyValue keyValue) throws IOException { + public synchronized Boolean apply(Table.KeyValue keyValue) throws IOException { String volume = getVolumeName(keyValue); String bucket = getBucketName(keyValue); initializePreviousSnapshotsFromChain(volume, bucket); From 8bc3cc64e237dafb242007d6b90704010f70ac35 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 6 Mar 2025 13:53:27 -0800 Subject: [PATCH 11/14] HDDS-11603. Fix size calculation issue Change-Id: Ifc9ccb406af0aaf8f1282dfb2d612383dca102fb --- .../om/snapshot/filter/ReclaimableFilter.java | 5 ++-- .../snapshot/filter/ReclaimableKeyFilter.java | 29 +++++++++++-------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index aa7cf8871bbc..3a6f87be0629 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -131,12 +131,11 @@ private void initializePreviousSnapshotsFromChain(String volume, String bucket) // If existing snapshotIds don't match then close all snapshots and reopen the previous N snapshots. close(); try { - // Acquire lock only on last N-1 snapshot & current snapshot(AOS if it is null). + // Acquire lock on last N snapshot & current snapshot(AOS if it is null). List expectedLastNSnapshotsInChain = getLastNSnapshotInChain(volume, bucket); - List expectedSnapshotIds = expectedLastNSnapshotsInChain.stream() + List lockIds = expectedLastNSnapshotsInChain.stream() .map(snapshotInfo -> snapshotInfo == null ? null : snapshotInfo.getSnapshotId()) .collect(Collectors.toList()); - List lockIds = new ArrayList<>(expectedSnapshotIds.subList(1, expectedSnapshotIds.size())); lockIds.add(currentSnapshotInfo == null ? null : currentSnapshotInfo.getSnapshotId()); if (snapshotIdLocks.acquireLock(lockIds).isLockAcquired()) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java index 79280922ca75..12fefa76c7f9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.ratis.util.MemoizedCheckedSupplier; +import org.apache.ratis.util.function.CheckedSupplier; /** * Filter to return deleted keys which are reclaimable based on their presence in previous snapshot in @@ -87,7 +88,7 @@ protected Boolean isReclaimable(Table.KeyValue deletedKeyInfo } // Getting keyInfo from prev snapshot's keyTable/fileTable - MemoizedCheckedSupplier, IOException> previousKeyInfo = + CheckedSupplier, IOException> previousKeyInfo = MemoizedCheckedSupplier.valueOf(() -> getPreviousSnapshotKey(deletedKeyInfo.getValue(), getBucketInfo(), getVolumeId(), renamedTable, previousKeyTable.get())); // If file not present in previous snapshot then it won't be present in previous to previous snapshot either. @@ -101,11 +102,11 @@ protected Boolean isReclaimable(Table.KeyValue deletedKeyInfo .getKeyTable(getBucketInfo().getBucketLayout())); } // Getting keyInfo from prev to prev snapshot's keyTable/fileTable based on keyInfo of prev keyTable - MemoizedCheckedSupplier, IOException> previousPrevKeyInfo = + CheckedSupplier, IOException> previousPrevKeyInfo = MemoizedCheckedSupplier.valueOf(() -> getPreviousSnapshotKey(previousKeyInfo.get().orElse(null), getBucketInfo(), getVolumeId(), prevRenamedTable.get(), previousPrevKeyTable.get())); - SnapshotInfo prevToPrevSnapshotInfo = getPreviousSnapshotInfo(0); - calculateExclusiveSize(prevToPrevSnapshotInfo, previousPrevKeyInfo.get().orElse(null), + SnapshotInfo previousSnapshotInfo = getPreviousSnapshotInfo(1); + calculateExclusiveSize(previousSnapshotInfo, previousKeyInfo, previousPrevKeyInfo, exclusiveSizeMap, exclusiveReplicatedSizeMap); return false; } @@ -134,15 +135,19 @@ public Map getExclusiveReplicatedSizeMap() { * previousToPrevSnapshot - Snapshot which is used to check * if key is exclusive to previousSnapshot. */ - private void calculateExclusiveSize(SnapshotInfo prevToPrevSnapKey, OmKeyInfo keyInfoPrevToPrevSnapshot, - Map exclusiveSizes, Map exclusiveReplicatedSizes) { - if (keyInfoPrevToPrevSnapshot == null) { - return; + private void calculateExclusiveSize(SnapshotInfo previousSnapshotInfo, + CheckedSupplier, IOException> keyInfoPrevSnapshot, + CheckedSupplier, IOException> keyInfoPrevToPrevSnapshot, + Map exclusiveSizes, Map exclusiveReplicatedSizes) + throws IOException { + if (keyInfoPrevSnapshot.get().isPresent() && !keyInfoPrevToPrevSnapshot.get().isPresent()) { + OmKeyInfo keyInfo = keyInfoPrevSnapshot.get().get(); + exclusiveSizes.compute(previousSnapshotInfo.getSnapshotId(), + (k, v) -> (v == null ? 0 : v) + keyInfo.getDataSize()); + exclusiveReplicatedSizes.compute(previousSnapshotInfo.getSnapshotId(), + (k, v) -> (v == null ? 0 : v) + keyInfo.getReplicatedSize()); } - exclusiveSizes.compute(prevToPrevSnapKey.getSnapshotId(), - (k, v) -> (v == null ? 0 : v) + keyInfoPrevToPrevSnapshot.getDataSize()); - exclusiveReplicatedSizes.compute(prevToPrevSnapKey.getSnapshotId(), - (k, v) -> (v == null ? 0 : v) + keyInfoPrevToPrevSnapshot.getReplicatedSize()); + } private Optional getPreviousSnapshotKey(OmKeyInfo keyInfo, OmBucketInfo bucketInfo, long volumeId, From 4c9bdfb0a0c164438af82f2bc82f3b896fd8d548 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 9 Mar 2025 00:58:47 -0800 Subject: [PATCH 12/14] HDDS-11603. Make more modular Change-Id: I47390045a950428c29cc13469bc9639875b5f2e0 --- ...ionOperation.java => CheckedFunction.java} | 2 +- .../apache/hadoop/ozone/om/KeyManager.java | 22 +++++- .../hadoop/ozone/om/KeyManagerImpl.java | 40 ++++++++++ .../snapshot/filter/ReclaimableDirFilter.java | 41 +++------- .../om/snapshot/filter/ReclaimableFilter.java | 34 ++++----- .../snapshot/filter/ReclaimableKeyFilter.java | 74 ++++++------------- .../filter/ReclaimableRenameEntryFilter.java | 48 +++++------- .../{filter => }/TestMultiSnapshotLocks.java | 3 +- .../filter/TestReclaimableFilter.java | 61 ++++++++++++++- 9 files changed, 188 insertions(+), 137 deletions(-) rename hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/{CheckedExceptionOperation.java => CheckedFunction.java} (94%) rename hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/{filter => }/TestMultiSnapshotLocks.java (97%) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java similarity index 94% rename from hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java index 4e41e54f0993..0d565cbf3f7a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedExceptionOperation.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java @@ -26,6 +26,6 @@ * – the type of the result of the function * - the type of exception thrown. */ -public interface CheckedExceptionOperation { +public interface CheckedFunction { R apply(T t) throws E; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index d25535b151d4..d3f346b17eef 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -31,6 +31,8 @@ import org.apache.hadoop.ozone.om.fs.OzoneManagerFS; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.ListKeysResult; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; @@ -40,6 +42,7 @@ import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; +import org.apache.hadoop.ozone.util.CheckedFunction; /** * Handles key level commands. @@ -83,7 +86,6 @@ OmKeyInfo lookupKey(OmKeyArgs args, ResolvedBucket bucketLayout, OmKeyInfo getKeyInfo(OmKeyArgs args, ResolvedBucket buctket, String clientAddress) throws IOException; - /** * Returns a list of keys represented by {@link OmKeyInfo} * in the given bucket. @@ -134,6 +136,24 @@ List> getRenamesKeyEntries( String volume, String bucket, String startKey, int size) throws IOException; + /** + * Returns the previous snapshot's ozone keyInfo corresponding for the object. + */ + CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo directoryInfo) throws IOException; + + /** + * Returns the previous snapshot's ozone keyInfo corresponding for the object. + */ + CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmKeyInfo directoryInfo) throws IOException; + + /** + * Returns the previous snapshot's ozone keyInfo corresponding for the object. + */ + CheckedFunction getPreviousSnapshotOzoneKeyInfo( + long volumeId, OmBucketInfo bucketInfo, OmKeyInfo keyInfo) throws IOException; + /** * Returns a list deleted entries from the deletedTable. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 4357542ff7b8..33cbf84e85d8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -161,6 +161,7 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; +import org.apache.hadoop.ozone.util.CheckedFunction; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; @@ -731,6 +732,45 @@ public List> getRenamesKeyEntries( } } + @Override + public CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo keyInfo) throws IOException { + String currentKeyPath = metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(), + keyInfo.getParentObjectID(), keyInfo.getName()); + return getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, keyInfo.getObjectID(), currentKeyPath, + (km) -> km.getMetadataManager().getDirectoryTable()); + } + + @Override + public CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmKeyInfo keyInfo) throws IOException { + String currentKeyPath = metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(), + keyInfo.getParentObjectID(), keyInfo.getFileName()); + return getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, keyInfo.getObjectID(), currentKeyPath, + (km) -> km.getMetadataManager().getDirectoryTable()); + } + + @Override + public CheckedFunction getPreviousSnapshotOzoneKeyInfo( + long volumeId, OmBucketInfo bucketInfo, OmKeyInfo keyInfo) throws IOException { + String currentKeyPath = bucketInfo.getBucketLayout().isFileSystemOptimized() + ? metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(), keyInfo.getParentObjectID(), + keyInfo.getFileName()) : metadataManager.getOzoneKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName(), + keyInfo.getKeyName()); + return getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, keyInfo.getObjectID(), currentKeyPath, + (km) -> km.getMetadataManager().getKeyTable(bucketInfo.getBucketLayout())); + } + + + private CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, long objectId, String currentKeyPath, + Function> table) throws IOException { + String renameKey = metadataManager.getRenameKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName(), objectId); + String renamedKey = metadataManager.getSnapshotRenamedTable().getIfExist(renameKey); + + return (km) -> table.apply(km).get(renamedKey == null ? renameKey : currentKeyPath); + } + @Override public List>> getDeletedKeyEntries( String volume, String bucket, String startKey, int size) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java index 863a7e33abb5..1123c6a2df73 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java @@ -19,7 +19,7 @@ import java.io.IOException; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -40,17 +40,12 @@ public class ReclaimableDirFilter extends ReclaimableFilter { /** * Filter to return deleted directories which are reclaimable based on their presence in previous snapshot in * the snapshot chain. - * - * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot - * in the snapshot chain corresponding to bucket key needs to be processed. - * @param metadataManager : MetadataManager corresponding to snapshot or AOS. - * @param lock : Lock for Active OM. */ public ReclaimableDirFilter(OzoneManager ozoneManager, OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, - SnapshotInfo currentSnapshotInfo, OMMetadataManager metadataManager, + SnapshotInfo currentSnapshotInfo, KeyManager keyManager, IOzoneManagerLock lock) { - super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, metadataManager, lock, 1); + super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, keyManager, lock, 1); } @Override @@ -66,33 +61,17 @@ protected String getBucketName(Table.KeyValue keyValue) throw @Override protected Boolean isReclaimable(Table.KeyValue deletedDirInfo) throws IOException { ReferenceCounted previousSnapshot = getPreviousOmSnapshot(0); - Table prevDirTable = previousSnapshot == null ? null : - previousSnapshot.get().getMetadataManager().getDirectoryTable(); - return isDirReclaimable(deletedDirInfo.getValue(), getVolumeId(), getBucketInfo(), prevDirTable, - getMetadataManager().getSnapshotRenamedTable()); + KeyManager prevKeyManager = previousSnapshot == null ? null : previousSnapshot.get().getKeyManager(); + return isDirReclaimable(getVolumeId(), getBucketInfo(), deletedDirInfo.getValue(), getKeyManager(), prevKeyManager); } - private boolean isDirReclaimable(OmKeyInfo dirInfo, - long volumeId, OmBucketInfo bucketInfo, - Table previousDirTable, - Table renamedTable) throws IOException { - if (previousDirTable == null) { + private boolean isDirReclaimable(long volumeId, OmBucketInfo bucketInfo, OmKeyInfo dirInfo, + KeyManager keyManager, KeyManager previousKeyManager) throws IOException { + if (previousKeyManager == null) { return true; } - String dbRenameKey = getOzoneManager().getMetadataManager().getRenameKey( - dirInfo.getVolumeName(), dirInfo.getBucketName(), dirInfo.getObjectID()); - - // snapshotRenamedTable: /volumeName/bucketName/objectID -> /volumeId/bucketId/parentId/dirName - String dbKeyBeforeRename = renamedTable.getIfExist(dbRenameKey); - String prevDbKey; - if (dbKeyBeforeRename != null) { - prevDbKey = dbKeyBeforeRename; - } else { - prevDbKey = getOzoneManager().getMetadataManager().getOzonePathKey( - volumeId, bucketInfo.getObjectID(), dirInfo.getParentObjectID(), dirInfo.getFileName()); - } - - OmDirectoryInfo prevDirectoryInfo = previousDirTable.get(prevDbKey); + OmDirectoryInfo prevDirectoryInfo = + keyManager.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, dirInfo).apply(previousKeyManager); return prevDirectoryInfo == null || prevDirectoryInfo.getObjectID() != dirInfo.getObjectID(); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index 3a6f87be0629..cc0e68eb24c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -26,7 +26,7 @@ import java.util.UUID; import java.util.stream.Collectors; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -38,13 +38,13 @@ import org.apache.hadoop.ozone.om.snapshot.MultiSnapshotLocks; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; -import org.apache.hadoop.ozone.util.CheckedExceptionOperation; +import org.apache.hadoop.ozone.util.CheckedFunction; /** * This class is responsible for opening last N snapshot given a snapshot metadata manager or AOS metadata manager by * acquiring a lock. */ -public abstract class ReclaimableFilter implements CheckedExceptionOperation, +public abstract class ReclaimableFilter implements CheckedFunction, Boolean, IOException>, Closeable { private final OzoneManager ozoneManager; @@ -57,7 +57,7 @@ public abstract class ReclaimableFilter implements CheckedExceptionOperation< private final MultiSnapshotLocks snapshotIdLocks; private Long volumeId; private OmBucketInfo bucketInfo; - private final OMMetadataManager metadataManager; + private final KeyManager keyManager; private final int numberOfPreviousSnapshotsFromChain; /** @@ -66,12 +66,12 @@ public abstract class ReclaimableFilter implements CheckedExceptionOperation< * * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot * in the snapshot chain corresponding to bucket key needs to be processed. - * @param metadataManager : MetadataManager corresponding to snapshot or AOS. + * @param keyManager : KeyManager corresponding to snapshot or AOS. * @param lock : Lock for Active OM. */ public ReclaimableFilter(OzoneManager ozoneManager, OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, - SnapshotInfo currentSnapshotInfo, OMMetadataManager metadataManager, + SnapshotInfo currentSnapshotInfo, KeyManager keyManager, IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain) { this.ozoneManager = ozoneManager; @@ -79,7 +79,7 @@ public ReclaimableFilter(OzoneManager ozoneManager, OmSnapshotManager omSnapshot this.currentSnapshotInfo = currentSnapshotInfo; this.snapshotChainManager = snapshotChainManager; this.snapshotIdLocks = new MultiSnapshotLocks(lock, OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK, false); - this.metadataManager = metadataManager; + this.keyManager = keyManager; this.numberOfPreviousSnapshotsFromChain = numberOfPreviousSnapshotsFromChain; this.previousOmSnapshots = new ArrayList<>(numberOfPreviousSnapshotsFromChain); this.previousSnapshotInfos = new ArrayList<>(numberOfPreviousSnapshotsFromChain); @@ -136,18 +136,15 @@ private void initializePreviousSnapshotsFromChain(String volume, String bucket) List lockIds = expectedLastNSnapshotsInChain.stream() .map(snapshotInfo -> snapshotInfo == null ? null : snapshotInfo.getSnapshotId()) .collect(Collectors.toList()); + //currentSnapshotInfo for AOS will be null. lockIds.add(currentSnapshotInfo == null ? null : currentSnapshotInfo.getSnapshotId()); if (snapshotIdLocks.acquireLock(lockIds).isLockAcquired()) { for (SnapshotInfo snapshotInfo : expectedLastNSnapshotsInChain) { if (snapshotInfo != null) { - // For AOS fail operation if any of the previous snapshots are not active. currentSnapshotInfo for - // AOS will be null. - previousOmSnapshots.add(currentSnapshotInfo == null - ? omSnapshotManager.getActiveSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), - snapshotInfo.getName()) - : omSnapshotManager.getSnapshot(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), - snapshotInfo.getName())); + // Fail operation if any of the previous snapshots are not active. + previousOmSnapshots.add(omSnapshotManager.getActiveSnapshot(snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), snapshotInfo.getName())); previousSnapshotInfos.add(snapshotInfo); } else { previousOmSnapshots.add(null); @@ -156,9 +153,8 @@ private void initializePreviousSnapshotsFromChain(String volume, String bucket) // TODO: Getting volumeId and bucket from active OM. This would be wrong on volume & bucket renames // support. + bucketInfo = ozoneManager.getBucketInfo(volume, bucket); volumeId = ozoneManager.getMetadataManager().getVolumeId(volume); - String dbBucketKey = ozoneManager.getMetadataManager().getBucketKey(volume, bucket); - bucketInfo = ozoneManager.getMetadataManager().getBucketTable().get(dbBucketKey); } } else { throw new IOException("Lock acquisition failed for last N snapshots : " + @@ -185,7 +181,7 @@ public synchronized Boolean apply(Table.KeyValue keyValue) throws IOE protected abstract String getBucketName(Table.KeyValue keyValue) throws IOException; - protected abstract Boolean isReclaimable(Table.KeyValue omKeyInfo) throws IOException; + protected abstract Boolean isReclaimable(Table.KeyValue keyValue) throws IOException; @Override public void close() throws IOException { @@ -201,8 +197,8 @@ protected ReferenceCounted getPreviousOmSnapshot(int index) { return previousOmSnapshots.get(index); } - protected OMMetadataManager getMetadataManager() { - return metadataManager; + protected KeyManager getKeyManager() { + return keyManager; } protected Long getVolumeId() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java index 12fefa76c7f9..f99d36f24376 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableKeyFilter.java @@ -24,9 +24,8 @@ import java.util.Map; import java.util.Optional; import java.util.UUID; -import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -50,14 +49,14 @@ public class ReclaimableKeyFilter extends ReclaimableFilter { /** * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot * in the snapshot chain corresponding to bucket key needs to be processed. - * @param metadataManager : MetadataManager corresponding to snapshot or AOS. + * @param keyManager : keyManager corresponding to snapshot or AOS. * @param lock : Lock for Active OM. */ public ReclaimableKeyFilter(OzoneManager ozoneManager, OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, - SnapshotInfo currentSnapshotInfo, OMMetadataManager metadataManager, + SnapshotInfo currentSnapshotInfo, KeyManager keyManager, IOzoneManagerLock lock) { - super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, metadataManager, lock, 2); + super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, keyManager, lock, 2); this.exclusiveSizeMap = new HashMap<>(); this.exclusiveReplicatedSizeMap = new HashMap<>(); } @@ -75,36 +74,30 @@ protected String getBucketName(Table.KeyValue keyValue) throw @Override protected Boolean isReclaimable(Table.KeyValue deletedKeyInfo) throws IOException { ReferenceCounted previousSnapshot = getPreviousOmSnapshot(1); - ReferenceCounted previousToPreviousSnapshot = getPreviousOmSnapshot(0); - AtomicReference> previousKeyTable = new AtomicReference<>(); - Table renamedTable = getMetadataManager().getSnapshotRenamedTable(); - AtomicReference> prevRenamedTable = new AtomicReference<>(); + KeyManager previousKeyManager = Optional.ofNullable(previousSnapshot) + .map(i -> i.get().getKeyManager()).orElse(null); - if (previousSnapshot != null) { - previousKeyTable.set(previousSnapshot.get().getMetadataManager().getKeyTable(getBucketInfo().getBucketLayout())); - prevRenamedTable.set(previousSnapshot.get().getMetadataManager().getSnapshotRenamedTable()); - } // Getting keyInfo from prev snapshot's keyTable/fileTable CheckedSupplier, IOException> previousKeyInfo = - MemoizedCheckedSupplier.valueOf(() -> getPreviousSnapshotKey(deletedKeyInfo.getValue(), getBucketInfo(), - getVolumeId(), renamedTable, previousKeyTable.get())); + MemoizedCheckedSupplier.valueOf(() -> getPreviousSnapshotKey(getVolumeId(), getBucketInfo(), + deletedKeyInfo.getValue(), getKeyManager(), previousKeyManager)); // If file not present in previous snapshot then it won't be present in previous to previous snapshot either. if (!previousKeyInfo.get().isPresent()) { return true; } - AtomicReference> previousPrevKeyTable = new AtomicReference<>(); - if (previousToPreviousSnapshot != null) { - previousPrevKeyTable.set(previousToPreviousSnapshot.get().getMetadataManager() - .getKeyTable(getBucketInfo().getBucketLayout())); - } + ReferenceCounted previousToPreviousSnapshot = getPreviousOmSnapshot(0); + KeyManager previousToPreviousKeyManager = Optional.ofNullable(previousToPreviousSnapshot) + .map(i -> i.get().getKeyManager()).orElse(null); + // Getting keyInfo from prev to prev snapshot's keyTable/fileTable based on keyInfo of prev keyTable CheckedSupplier, IOException> previousPrevKeyInfo = - MemoizedCheckedSupplier.valueOf(() -> getPreviousSnapshotKey(previousKeyInfo.get().orElse(null), - getBucketInfo(), getVolumeId(), prevRenamedTable.get(), previousPrevKeyTable.get())); + MemoizedCheckedSupplier.valueOf(() -> getPreviousSnapshotKey( + getVolumeId(), getBucketInfo(), previousKeyInfo.get().orElse(null), previousKeyManager, + previousToPreviousKeyManager)); SnapshotInfo previousSnapshotInfo = getPreviousSnapshotInfo(1); calculateExclusiveSize(previousSnapshotInfo, previousKeyInfo, previousPrevKeyInfo, exclusiveSizeMap, exclusiveReplicatedSizeMap); @@ -150,40 +143,17 @@ private void calculateExclusiveSize(SnapshotInfo previousSnapshotInfo, } - private Optional getPreviousSnapshotKey(OmKeyInfo keyInfo, OmBucketInfo bucketInfo, long volumeId, - Table snapRenamedTable, - Table previousKeyTable) throws IOException { + private Optional getPreviousSnapshotKey(long volumeId, OmBucketInfo bucketInfo, + OmKeyInfo keyInfo, KeyManager keyManager, + KeyManager previousKeyManager) throws IOException { - if (keyInfo == null || previousKeyTable == null) { + if (keyInfo == null || previousKeyManager == null) { return Optional.empty(); } - String dbRenameKey = getOzoneManager().getMetadataManager().getRenameKey( - keyInfo.getVolumeName(), - keyInfo.getBucketName(), - keyInfo.getObjectID()); - - String renamedKey = snapRenamedTable.getIfExist(dbRenameKey); - OmKeyInfo prevKeyInfo; - - if (renamedKey == null) { - String dbKeyPrevSnap; - if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { - dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzonePathKey( - volumeId, - bucketInfo.getObjectID(), - keyInfo.getParentObjectID(), - keyInfo.getFileName()); - } else { - dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzoneKey( - keyInfo.getVolumeName(), - keyInfo.getBucketName(), - keyInfo.getKeyName()); - } - prevKeyInfo = previousKeyTable.get(dbKeyPrevSnap); - } else { - prevKeyInfo = previousKeyTable.get(renamedKey); - } + OmKeyInfo prevKeyInfo = keyManager.getPreviousSnapshotOzoneKeyInfo(volumeId, bucketInfo, keyInfo) + .apply(previousKeyManager); + // Check if objectIds are matching then the keys are the same. if (prevKeyInfo == null || prevKeyInfo.getObjectID() != keyInfo.getObjectID()) { return Optional.empty(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java index 3dfc2e73b00b..cfb13b227a27 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java @@ -19,7 +19,7 @@ import java.io.IOException; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; @@ -36,17 +37,12 @@ */ public class ReclaimableRenameEntryFilter extends ReclaimableFilter { - /** - * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot - * in the snapshot chain corresponding to bucket key needs to be processed. - * @param metadataManager : MetadataManager corresponding to snapshot or AOS. - * @param lock : Lock for Active OM. - */ + public ReclaimableRenameEntryFilter(OzoneManager ozoneManager, OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, - SnapshotInfo currentSnapshotInfo, OMMetadataManager metadataManager, + SnapshotInfo currentSnapshotInfo, KeyManager keyManager, IOzoneManagerLock lock) { - super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, metadataManager, lock, 1); + super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, keyManager, lock, 1); } @Override @@ -63,35 +59,27 @@ protected Boolean isReclaimable(Table.KeyValue renameEntry) thro @Override protected String getVolumeName(Table.KeyValue keyValue) throws IOException { - return getMetadataManager().splitRenameKey(keyValue.getKey())[0]; + return getKeyManager().getMetadataManager().splitRenameKey(keyValue.getKey())[0]; } @Override protected String getBucketName(Table.KeyValue keyValue) throws IOException { - return getMetadataManager().splitRenameKey(keyValue.getKey())[1]; + return getKeyManager().getMetadataManager().splitRenameKey(keyValue.getKey())[1]; } - private boolean isRenameEntryReclaimable(Table.KeyValue renameEntry, - Table previousDirTable, - Table prevKeyInfoTable) throws IOException { - - if (previousDirTable == null && prevKeyInfoTable == null) { - return true; - } - String prevDbKey = renameEntry.getValue(); - - - if (previousDirTable != null) { - OmDirectoryInfo prevDirectoryInfo = previousDirTable.getIfExist(prevDbKey); - if (prevDirectoryInfo != null) { - return false; + @SafeVarargs + private final boolean isRenameEntryReclaimable(Table.KeyValue renameEntry, + Table... previousTables) + throws IOException { + for (Table previousTable : previousTables) { + if (previousTable != null) { + String prevDbKey = renameEntry.getValue(); + WithObjectID withObjectID = previousTable.getIfExist(prevDbKey); + if (withObjectID != null) { + return false; + } } } - - if (prevKeyInfoTable != null) { - OmKeyInfo omKeyInfo = prevKeyInfoTable.getIfExist(prevDbKey); - return omKeyInfo == null; - } return true; } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestMultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java similarity index 97% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestMultiSnapshotLocks.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java index bb147332a7f4..741f1d30c36e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestMultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om.snapshot.filter; +package org.apache.hadoop.ozone.om.snapshot; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -38,7 +38,6 @@ import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; -import org.apache.hadoop.ozone.om.snapshot.MultiSnapshotLocks; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java index 300cb5a991fd..e6352992b1c2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java @@ -17,12 +17,71 @@ package org.apache.hadoop.ozone.om.snapshot.filter; +import java.io.IOException; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.mockito.Mockito; /** * Test class for ReclaimableFilter. */ -public class TestReclaimableFilter { +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class TestReclaimableFilter { + + private ReclaimableFilter reclaimableFilter; + private OzoneManager ozoneManager; + private OmSnapshotManager omSnapshotManager; + private SnapshotChainManager chainManager; + private KeyManager keyManager; + private IOzoneManagerLock ozoneManagerLock; + + + + protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager snapshotChainManager, + SnapshotInfo currentSnapshotInfo, KeyManager km, + IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain) { + return new ReclaimableFilter(om, snapshotManager, snapshotChainManager, currentSnapshotInfo, + km, lock, numberOfPreviousSnapshotsFromChain) { + @Override + protected String getVolumeName(Table.KeyValue keyValue) throws IOException { + return keyValue.getKey().split("/")[0]; + } + + @Override + protected String getBucketName(Table.KeyValue keyValue) throws IOException { + return keyValue.getKey().split("/")[1]; + } + + @Override + protected Boolean isReclaimable(Table.KeyValue keyValue) throws IOException { + return keyValue.getValue(); + } + }; + } + + public void setup(SnapshotInfo currentSnapshotInfo, int numberOfPreviousSnapshotsFromChain) { + this.ozoneManager = Mockito.mock(OzoneManager.class); + this.omSnapshotManager = Mockito.mock(OmSnapshotManager.class); + this.chainManager = Mockito.mock(SnapshotChainManager.class); + this.keyManager = Mockito.mock(KeyManager.class); + this.ozoneManagerLock = Mockito.mock(IOzoneManagerLock.class); + this.reclaimableFilter = initializeFilter(ozoneManager, omSnapshotManager, chainManager, + currentSnapshotInfo, keyManager, ozoneManagerLock, numberOfPreviousSnapshotsFromChain); + } + + @AfterAll + public void teardown() { + + } @Test public void testReclaimableFilter() { From b7e20caf0a8696822cd667967b2306561e02b735 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 9 Mar 2025 23:06:30 -0700 Subject: [PATCH 13/14] HDDS-11603. Add reclaimable Filter test case Change-Id: Id38661072466b08ab24140f13f3310f480711bd0 --- .../ozone/om/snapshot/MultiSnapshotLocks.java | 14 +- .../om/snapshot/filter/ReclaimableFilter.java | 28 +- .../filter/TestReclaimableFilter.java | 438 ++++++++++++++++-- 3 files changed, 444 insertions(+), 36 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java index 86ad8807493d..9b8b0db2a698 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java @@ -38,12 +38,14 @@ public class MultiSnapshotLocks { private final IOzoneManagerLock lock; private final OzoneManagerLock.Resource resource; private final boolean writeLock; + private OMLockDetails lockDetails; public MultiSnapshotLocks(IOzoneManagerLock lock, OzoneManagerLock.Resource resource, boolean writeLock) { this.writeLock = writeLock; this.resource = resource; this.lock = lock; this.objectLocks = new ArrayList<>(); + this.lockDetails = OMLockDetails.EMPTY_DETAILS_LOCK_NOT_ACQUIRED; } public OMLockDetails acquireLock(Collection ids) throws OMException { @@ -53,20 +55,22 @@ public OMLockDetails acquireLock(Collection ids) throws OMException { OMException.ResultCodes.INTERNAL_ERROR); } List keys = - ids.stream().filter(Objects::nonNull).map(id -> new String[] {id.toString()}).collect(Collectors.toList()); + ids.stream().filter(Objects::nonNull).map(id -> new String[] {id.toString()}) + .collect(Collectors.toList()); OMLockDetails omLockDetails = this.writeLock ? lock.acquireWriteLocks(resource, keys) : lock.acquireReadLocks(resource, keys); if (omLockDetails.isLockAcquired()) { objectLocks.addAll(keys); } + this.lockDetails = omLockDetails; return omLockDetails; } public void releaseLock() { if (this.writeLock) { - lock.releaseWriteLocks(resource, this.objectLocks); + lockDetails = lock.releaseWriteLocks(resource, this.objectLocks); } else { - lock.releaseReadLocks(resource, this.objectLocks); + lockDetails = lock.releaseReadLocks(resource, this.objectLocks); } this.objectLocks.clear(); } @@ -75,4 +79,8 @@ public void releaseLock() { public List getObjectLocks() { return objectLocks; } + + public boolean isLockAcquired() { + return lockDetails.isLockAcquired(); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index cc0e68eb24c8..dcbc2f44e6e8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.om.snapshot.filter; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import java.io.Closeable; import java.io.IOException; @@ -25,6 +26,7 @@ import java.util.List; import java.util.UUID; import java.util.stream.Collectors; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -39,6 +41,8 @@ import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.util.CheckedFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class is responsible for opening last N snapshot given a snapshot metadata manager or AOS metadata manager by @@ -47,6 +51,8 @@ public abstract class ReclaimableFilter implements CheckedFunction, Boolean, IOException>, Closeable { + private static final Logger LOG = LoggerFactory.getLogger(ReclaimableFilter.class); + private final OzoneManager ozoneManager; private final SnapshotInfo currentSnapshotInfo; private final OmSnapshotManager omSnapshotManager; @@ -94,17 +100,17 @@ private List getLastNSnapshotInChain(String volume, String bucket) SnapshotInfo expectedPreviousSnapshotInfo = currentSnapshotInfo == null ? SnapshotUtils.getLatestSnapshotInfo(volume, bucket, ozoneManager, snapshotChainManager) : SnapshotUtils.getPreviousSnapshot(ozoneManager, snapshotChainManager, currentSnapshotInfo); - List snapshotInfos = Lists.newArrayList(expectedPreviousSnapshotInfo); + List snapshotInfos = Lists.newArrayList(); SnapshotInfo snapshotInfo = expectedPreviousSnapshotInfo; while (snapshotInfos.size() < numberOfPreviousSnapshotsFromChain) { - snapshotInfo = snapshotInfo == null ? null - : SnapshotUtils.getPreviousSnapshot(ozoneManager, snapshotChainManager, snapshotInfo); - snapshotInfos.add(snapshotInfo); // If changes made to the snapshot have not been flushed to disk, throw exception immediately, next run of // garbage collection would process the snapshot. if (!OmSnapshotManager.areSnapshotChangesFlushedToDB(ozoneManager.getMetadataManager(), snapshotInfo)) { throw new IOException("Changes made to the snapshot " + snapshotInfo + " have not been flushed to the disk "); } + snapshotInfos.add(snapshotInfo); + snapshotInfo = snapshotInfo == null ? null + : SnapshotUtils.getPreviousSnapshot(ozoneManager, snapshotChainManager, snapshotInfo); } // Reversing list to get the correct order in chain. To ensure locking order is as per the chain ordering. @@ -125,7 +131,7 @@ private boolean validateExistingLastNSnapshotsInChain(String volume, String buck // Initialize the last N snapshots in the chain by acquiring locks. Throw IOException if it fails. private void initializePreviousSnapshotsFromChain(String volume, String bucket) throws IOException { - if (validateExistingLastNSnapshotsInChain(volume, bucket)) { + if (validateExistingLastNSnapshotsInChain(volume, bucket) && snapshotIdLocks.isLockAcquired()) { return; } // If existing snapshotIds don't match then close all snapshots and reopen the previous N snapshots. @@ -187,7 +193,7 @@ public synchronized Boolean apply(Table.KeyValue keyValue) throws IOE public void close() throws IOException { this.snapshotIdLocks.releaseLock(); for (ReferenceCounted previousOmSnapshot : previousOmSnapshots) { - previousOmSnapshot.close(); + IOUtils.close(LOG, previousOmSnapshot); } previousOmSnapshots.clear(); previousSnapshotInfos.clear(); @@ -216,4 +222,14 @@ protected SnapshotInfo getPreviousSnapshotInfo(int index) { protected OzoneManager getOzoneManager() { return ozoneManager; } + + @VisibleForTesting + List getPreviousSnapshotInfos() { + return previousSnapshotInfos; + } + + @VisibleForTesting + List> getPreviousOmSnapshots() { + return previousOmSnapshots; + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java index e6352992b1c2..687f484a74b2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java @@ -17,39 +17,94 @@ package org.apache.hadoop.ozone.om.snapshot.filter; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.CALLS_REAL_METHODS; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.when; + import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Test; +import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; import org.mockito.Mockito; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.DBOptions; +import org.rocksdb.ReadOptions; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksDBException; +import org.rocksdb.RocksIterator; /** * Test class for ReclaimableFilter. */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) -public abstract class TestReclaimableFilter { +public class TestReclaimableFilter { - private ReclaimableFilter reclaimableFilter; + private ReclaimableFilter reclaimableFilter; private OzoneManager ozoneManager; private OmSnapshotManager omSnapshotManager; - private SnapshotChainManager chainManager; - private KeyManager keyManager; - private IOzoneManagerLock ozoneManagerLock; - - + private AtomicReference> lockIds = new AtomicReference<>(Collections.emptyList()); + private List volumes; + private List buckets; + private MockedStatic mockedSnapshotUtils; + private Map> snapshotInfos; + @TempDir + private Path testDir; + private SnapshotChainManager snapshotChainManager; - protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, - SnapshotChainManager snapshotChainManager, - SnapshotInfo currentSnapshotInfo, KeyManager km, - IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain) { - return new ReclaimableFilter(om, snapshotManager, snapshotChainManager, currentSnapshotInfo, + protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, + SnapshotInfo currentSnapshotInfo, KeyManager km, + IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain) { + return new ReclaimableFilter(om, snapshotManager, chainManager, currentSnapshotInfo, km, lock, numberOfPreviousSnapshotsFromChain) { @Override protected String getVolumeName(Table.KeyValue keyValue) throws IOException { @@ -63,29 +118,358 @@ protected String getBucketName(Table.KeyValue keyValue) throws @Override protected Boolean isReclaimable(Table.KeyValue keyValue) throws IOException { - return keyValue.getValue(); + return keyValue == null || keyValue.getValue(); } }; } - public void setup(SnapshotInfo currentSnapshotInfo, int numberOfPreviousSnapshotsFromChain) { - this.ozoneManager = Mockito.mock(OzoneManager.class); - this.omSnapshotManager = Mockito.mock(OmSnapshotManager.class); - this.chainManager = Mockito.mock(SnapshotChainManager.class); - this.keyManager = Mockito.mock(KeyManager.class); - this.ozoneManagerLock = Mockito.mock(IOzoneManagerLock.class); - this.reclaimableFilter = initializeFilter(ozoneManager, omSnapshotManager, chainManager, - currentSnapshotInfo, keyManager, ozoneManagerLock, numberOfPreviousSnapshotsFromChain); + public SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, + int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume) throws RocksDBException, IOException { + return setup(numberOfPreviousSnapshotsFromChain, actualTotalNumberOfSnapshotsInChain, index, numberOfVolumes, + numberOfBucketsPerVolume, (info) -> info); + } + + public SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, + int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume, Function snapshotProps) + throws IOException, RocksDBException { + this.ozoneManager = mock(OzoneManager.class); + this.snapshotChainManager = mock(SnapshotChainManager.class); + KeyManager keyManager = mock(KeyManager.class); + IOzoneManagerLock ozoneManagerLock = mock(IOzoneManagerLock.class); + when(ozoneManagerLock.acquireReadLocks(eq(OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK), anyList())) + .thenAnswer(i -> { + lockIds.set( + (List) i.getArgument(1, List.class).stream().map(val -> UUID.fromString(((String[]) val)[0])) + .collect(Collectors.toList())); + return OMLockDetails.EMPTY_DETAILS_LOCK_ACQUIRED; + }); + when(ozoneManagerLock.releaseReadLocks(eq(OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK), anyList())) + .thenAnswer(i -> { + Assertions.assertEquals(lockIds.get(), + i.getArgument(1, List.class).stream().map(val -> UUID.fromString(((String[]) val)[0])) + .collect(Collectors.toList())); + lockIds.set(Collections.emptyList()); + return OMLockDetails.EMPTY_DETAILS_LOCK_NOT_ACQUIRED; + }); + snapshotInfos = mockSnapshotChain(actualTotalNumberOfSnapshotsInChain, + ozoneManager, snapshotChainManager, numberOfVolumes, numberOfBucketsPerVolume, snapshotProps); + mockOmSnapshotManager(ozoneManager); + SnapshotInfo info = index >= actualTotalNumberOfSnapshotsInChain ? null : + snapshotInfos.get(getKey(volumes.get(volumes.size() - 1), buckets.get(buckets.size() - 1))).get(index); + this.reclaimableFilter = Mockito.spy(initializeFilter(ozoneManager, omSnapshotManager, snapshotChainManager, + info, keyManager, ozoneManagerLock, numberOfPreviousSnapshotsFromChain)); + return info; + } + + @AfterEach + public void teardown() throws IOException { + this.mockedSnapshotUtils.close(); + this.reclaimableFilter.close(); + } + + private void mockOmSnapshotManager(OzoneManager om) throws RocksDBException, IOException { + try (MockedStatic rocksdb = Mockito.mockStatic(ManagedRocksDB.class); + MockedConstruction mockedCache = Mockito.mockConstruction(SnapshotCache.class, + (mock, context) -> { + when(mock.get(any(UUID.class))).thenAnswer(i -> { + if (snapshotInfos.values().stream().flatMap(List::stream) + .map(SnapshotInfo::getSnapshotId) + .noneMatch(id -> id.equals(i.getArgument(0, UUID.class)))) { + throw new IOException("Snapshot " + i.getArgument(0, UUID.class) + " not found"); + } + ReferenceCounted referenceCounted = mock(ReferenceCounted.class); + OmSnapshot omSnapshot = mock(OmSnapshot.class); + when(omSnapshot.getSnapshotID()).thenReturn(i.getArgument(0, UUID.class)); + when(referenceCounted.get()).thenReturn(omSnapshot); + return referenceCounted; + }); + })) { + ManagedRocksDB managedRocksDB = mock(ManagedRocksDB.class); + RocksDB rocksDB = mock(RocksDB.class); + rocksdb.when(() -> ManagedRocksDB.open(any(DBOptions.class), anyString(), anyList(), anyList())) + .thenReturn(managedRocksDB); + RocksIterator emptyRocksIterator = mock(RocksIterator.class); + when(emptyRocksIterator.isValid()).thenReturn(false); + when(rocksDB.newIterator(any(ColumnFamilyHandle.class), any(ReadOptions.class))).thenReturn(emptyRocksIterator); + when(rocksDB.newIterator(any(ColumnFamilyHandle.class))).thenReturn(emptyRocksIterator); + OMMetadataManager metadataManager = mock(OMMetadataManager.class); + DBStore dbStore = mock(RDBStore.class); + when(metadataManager.getStore()).thenReturn(dbStore); + when(dbStore.getRocksDBCheckpointDiffer()).thenReturn(Mockito.mock(RocksDBCheckpointDiffer.class)); + when(ozoneManager.getMetadataManager()).thenReturn(metadataManager); + Table mockedTransactionTable = Mockito.mock(Table.class); + when(metadataManager.getTransactionInfoTable()).thenReturn(mockedTransactionTable); + when(mockedTransactionTable.getSkipCache(eq(TRANSACTION_INFO_KEY))) + .thenReturn(TransactionInfo.valueOf(0, 10)); + when(managedRocksDB.get()).thenReturn(rocksDB); + + when(rocksDB.createColumnFamily(any(ColumnFamilyDescriptor.class))) + .thenAnswer(i -> { + ColumnFamilyDescriptor descriptor = i.getArgument(0, ColumnFamilyDescriptor.class); + ColumnFamilyHandle ch = Mockito.mock(ColumnFamilyHandle.class); + when(ch.getName()).thenReturn(descriptor.getName()); + return ch; + }); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(OZONE_METADATA_DIRS, testDir.toAbsolutePath().toFile().getAbsolutePath()); + when(om.getConfiguration()).thenReturn(conf); + when(om.isFilesystemSnapshotEnabled()).thenReturn(true); + this.omSnapshotManager = new OmSnapshotManager(om); + } + } + + private Map> mockSnapshotChain( + int numberOfSnaphotsInChain, OzoneManager om, SnapshotChainManager chainManager, int numberOfVolumes, + int numberOfBuckets, Function snapshotInfoProp) { + volumes = IntStream.range(0, numberOfVolumes).mapToObj(i -> "volume" + i).collect(Collectors.toList()); + buckets = IntStream.range(0, numberOfBuckets).mapToObj(i -> "bucket" + i).collect(Collectors.toList()); + Map> bucketSnapshotMap = new HashMap<>(); + for (String volume : volumes) { + for (String bucket : buckets) { + bucketSnapshotMap.computeIfAbsent(getKey(volume, bucket), (k) -> new ArrayList<>()); + } + } + mockedSnapshotUtils = mockStatic(SnapshotUtils.class, CALLS_REAL_METHODS); + for (int i = 0; i < numberOfSnaphotsInChain; i++) { + for (String volume : volumes) { + for (String bucket : buckets) { + SnapshotInfo snapshotInfo = snapshotInfoProp.apply(SnapshotInfo.newInstance(volume, bucket, + "snap" + i, UUID.randomUUID(), 0)); + List infos = bucketSnapshotMap.get(getKey(volume, bucket)); + mockedSnapshotUtils.when(() -> SnapshotUtils.getSnapshotInfo(eq(ozoneManager), + eq(snapshotInfo.getTableKey()))).thenReturn(snapshotInfo); + mockedSnapshotUtils.when(() -> SnapshotUtils.getPreviousSnapshot(eq(om), eq(chainManager), + eq(snapshotInfo))).thenReturn(infos.isEmpty() ? null : infos.get(infos.size() - 1)); + infos.add(snapshotInfo); + } + } + } + + for (String volume : volumes) { + for (String bucket : buckets) { + mockedSnapshotUtils.when(() -> SnapshotUtils.getLatestSnapshotInfo( + eq(volume), eq(bucket), eq(om), eq(chainManager))) + .thenAnswer(i -> { + List infos = bucketSnapshotMap.get(getKey(volume, bucket)); + return infos.isEmpty() ? null : infos.get(infos.size() - 1); + }); + } + } + return bucketSnapshotMap; + + } + + List testReclaimableFilterArguments() { + List arguments = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + for (int k = 0; k < 5; k++) { + arguments.add(Arguments.of(i, j, k)); + } + } + } + return arguments; + } + + private List getLastSnapshotInfos(String volume, String bucket, int numberOfSnapshotsInChain, + int index) { + List infos = snapshotInfos.get(getKey(volume, bucket)); + int endIndex = Math.min(index - 1, infos.size() - 1); + return IntStream.range(endIndex - numberOfSnapshotsInChain + 1, endIndex + 1).mapToObj(i -> i >= 0 ? + infos.get(i) : null).collect(Collectors.toList()); + } + + private void testSnapshotInitAndLocking(String volume, String bucket, int numberOfPreviousSnapshotsFromChain, + int index, SnapshotInfo currentSnapshotInfo, Boolean reclaimable, + Boolean expectedReturnValue) throws IOException { + List infos = getLastSnapshotInfos(volume, bucket, numberOfPreviousSnapshotsFromChain, index); + assertEquals(expectedReturnValue, reclaimableFilter.apply(Table.newKeyValue(getKey(volume, bucket), reclaimable))); + Assertions.assertEquals(infos, reclaimableFilter.getPreviousSnapshotInfos()); + Assertions.assertEquals(infos.size(), reclaimableFilter.getPreviousOmSnapshots().size()); + Assertions.assertEquals(infos.stream().map(si -> si == null ? null : si.getSnapshotId()) + .collect(Collectors.toList()), reclaimableFilter.getPreviousOmSnapshots().stream() + .map(i -> i == null ? null : ((ReferenceCounted) i).get().getSnapshotID()) + .collect(Collectors.toList())); + infos.add(currentSnapshotInfo); + Assertions.assertEquals(infos.stream().filter(Objects::nonNull).map(SnapshotInfo::getSnapshotId).collect( + Collectors.toList()), lockIds.get()); } - @AfterAll - public void teardown() { + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableFilterSnapshotChainInitilization(int numberOfPreviousSnapshotsFromChain, + int actualNumberOfSnapshots, + int index) throws IOException, RocksDBException { + SnapshotInfo currentSnapshotInfo = + setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 2); + String volume = volumes.get(3); + String bucket = buckets.get(1); + testSnapshotInitAndLocking(volume, bucket, numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, true, + true); + testSnapshotInitAndLocking(volume, bucket, numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, false, + false); + } + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableFilterWithBucketVolumeMismatch(int numberOfPreviousSnapshotsFromChain, + int actualNumberOfSnapshots, + int index) throws IOException, RocksDBException { + SnapshotInfo currentSnapshotInfo = + setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 4); + AtomicReference volume = new AtomicReference<>(volumes.get(2)); + AtomicReference bucket = new AtomicReference<>(buckets.get(3)); + if (currentSnapshotInfo == null) { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, false, false); + } else { + IOException ex = assertThrows(IOException.class, () -> + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true)); + assertEquals("Volume & Bucket name for snapshot : " + + currentSnapshotInfo + " not matching for key in volume: " + volume + + " bucket: " + bucket, ex.getMessage()); + } + volume.set(volumes.get(3)); + bucket.set(buckets.get(2)); + if (currentSnapshotInfo == null) { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, false, false); + } else { + IOException ex = assertThrows(IOException.class, () -> + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true)); + assertEquals("Volume & Bucket name for snapshot : " + + currentSnapshotInfo + " not matching for key in volume: " + volume + + " bucket: " + bucket, ex.getMessage()); + } } - @Test - public void testReclaimableFilter() { + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimabilityOnSnapshotAddition(int numberOfPreviousSnapshotsFromChain, + int actualNumberOfSnapshots, + int index) throws IOException, RocksDBException { + + SnapshotInfo currentSnapshotInfo = + setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 4); + AtomicReference volume = new AtomicReference<>(volumes.get(3)); + AtomicReference bucket = new AtomicReference<>(buckets.get(3)); + when(reclaimableFilter.isReclaimable(any(Table.KeyValue.class))).thenAnswer(i -> { + if (i.getArgument(0) == null) { + return null; + } + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volume.get(), bucket.get(), + "snap" + actualNumberOfSnapshots, UUID.randomUUID(), 0); + SnapshotInfo prevSnapshot = SnapshotUtils.getLatestSnapshotInfo(volume.get(), bucket.get(), ozoneManager, + snapshotChainManager); + mockedSnapshotUtils.when(() -> SnapshotUtils.getSnapshotInfo(eq(ozoneManager), eq(snapshotInfo.getTableKey()))) + .thenReturn(snapshotInfo); + mockedSnapshotUtils.when(() -> SnapshotUtils.getPreviousSnapshot(eq(ozoneManager), eq(this.snapshotChainManager), + eq(snapshotInfo))).thenReturn(prevSnapshot); + snapshotInfos.get(getKey(volume.get(), bucket.get())).add(snapshotInfo); + return i.callRealMethod(); + }); + + if (currentSnapshotInfo == null) { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, numberOfPreviousSnapshotsFromChain == 0); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index + 1, + currentSnapshotInfo, false, false); + } else { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, false, false); + } } + List testInvalidSnapshotArgs() { + List arguments = testReclaimableFilterArguments(); + return arguments.stream().flatMap(args -> IntStream.range(0, (int) args.get()[1]) + .mapToObj(i -> Arguments.of(args.get()[0], args.get()[1], args.get()[2], i))) + .collect(Collectors.toList()); + } + + @ParameterizedTest + @MethodSource("testInvalidSnapshotArgs") + public void testInitWithInactiveSnapshots(int numberOfPreviousSnapshotsFromChain, + int actualNumberOfSnapshots, + int index, + int snapIndex) throws IOException, RocksDBException { + SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, + 1, 1, (snapshotInfo) -> { + if (snapshotInfo.getVolumeName().equals(volumes.get(0)) && snapshotInfo.getBucketName().equals(buckets.get(0)) + && snapshotInfo.getName().equals("snap" + snapIndex)) { + snapshotInfo.setSnapshotStatus(SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED); + } + return snapshotInfo; + }); + + AtomicReference volume = new AtomicReference<>(volumes.get(0)); + AtomicReference bucket = new AtomicReference<>(buckets.get(0)); + int endIndex = Math.min(index - 1, actualNumberOfSnapshots - 1); + int beginIndex = Math.max(0, endIndex - numberOfPreviousSnapshotsFromChain + 1); + if (snapIndex < beginIndex || snapIndex > endIndex) { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, false, false); + } else { + IOException ex = assertThrows(IOException.class, () -> + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true)); + + assertEquals(String.format("Unable to load snapshot. Snapshot with table key '/%s/%s/%s' is no longer active", + volume.get(), bucket.get(), "snap" + snapIndex), ex.getMessage()); + } + } + + @ParameterizedTest + @MethodSource("testInvalidSnapshotArgs") + public void testInitWithUnflushedSnapshots(int numberOfPreviousSnapshotsFromChain, + int actualNumberOfSnapshots, + int index, + int snapIndex) throws IOException, RocksDBException { + SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, + 4, 4, (snapshotInfo) -> { + if (snapshotInfo.getVolumeName().equals(volumes.get(3)) && snapshotInfo.getBucketName().equals(buckets.get(3)) + && snapshotInfo.getName().equals("snap" + snapIndex)) { + try { + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(0, 11).toByteString()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + return snapshotInfo; + }); + + AtomicReference volume = new AtomicReference<>(volumes.get(3)); + AtomicReference bucket = new AtomicReference<>(buckets.get(3)); + int endIndex = Math.min(index - 1, actualNumberOfSnapshots - 1); + int beginIndex = Math.max(0, endIndex - numberOfPreviousSnapshotsFromChain + 1); + if (snapIndex < beginIndex || snapIndex > endIndex) { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, false, false); + } else { + IOException ex = assertThrows(IOException.class, () -> + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true)); + assertEquals(String.format("Changes made to the snapshot %s have not been flushed to the disk ", + snapshotInfos.get(getKey(volume.get(), bucket.get())).get(snapIndex)), ex.getMessage()); + } + } + + public static String getKey(String volume, String bucket) { + return volume + "/" + bucket; + } } From 0ee6b0640311fefd843fdd63698515c9b2bece03 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 10 Mar 2025 21:11:50 -0700 Subject: [PATCH 14/14] HDDS-11603. Add KeyManager test Change-Id: Ia1027f55775864c2353a3e67cc56b76cff5f0727 --- .../hadoop/ozone/om/TestKeyManagerImpl.java | 144 ++++++++ .../hadoop/ozone/om/KeyManagerImpl.java | 18 +- .../filter/ReclaimableRenameEntryFilter.java | 4 +- .../filter/TestAbstractReclaimableFilter.java | 320 ++++++++++++++++++ .../filter/TestReclaimableDirFilter.java | 142 ++++++++ .../filter/TestReclaimableFilter.java | 260 +++----------- .../filter/TestReclaimableKeyFilter.java | 267 +++++++++++++++ .../TestReclaimableRenameEntryFilter.java | 208 ++++++++++++ 8 files changed, 1132 insertions(+), 231 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableKeyFilter.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableRenameEntryFilter.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 4aef43f86412..e2486e06cb68 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -41,6 +41,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyLong; @@ -53,6 +54,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import jakarta.annotation.Nonnull; import java.io.File; @@ -69,6 +71,7 @@ import java.util.Set; import java.util.TreeSet; import java.util.UUID; +import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.tuple.Pair; @@ -104,6 +107,7 @@ import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.server.SCMConfigurator; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; @@ -112,6 +116,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -141,6 +146,7 @@ import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.EnumSource; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; @@ -1586,6 +1592,144 @@ void testGetNotExistedPart() throws IOException { assertEquals(0, locationList.size()); } + private Table getMockedTable(Map map) throws IOException { + Table table = mock(Table.class); + when(table.get(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); + when(table.getIfExist(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); + return table; + } + + private OmKeyInfo getMockedOmKeyInfo(OmBucketInfo bucketInfo, long parentId, String key, long objectId) { + OmKeyInfo omKeyInfo = mock(OmKeyInfo.class); + if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { + when(omKeyInfo.getFileName()).thenReturn(key); + } else { + when(omKeyInfo.getParentObjectID()).thenReturn(parentId); + when(omKeyInfo.getKeyName()).thenReturn(key); + } + when(omKeyInfo.getObjectID()).thenReturn(objectId); + return omKeyInfo; + } + + private OmDirectoryInfo getMockedOmDirInfo(long parentId, String key, long objectId) { + OmDirectoryInfo omKeyInfo = mock(OmDirectoryInfo.class); + when(omKeyInfo.getName()).thenReturn(key); + when(omKeyInfo.getParentObjectID()).thenReturn(parentId); + when(omKeyInfo.getParentObjectID()).thenReturn(0L); + when(omKeyInfo.getObjectID()).thenReturn(objectId); + return omKeyInfo; + } + + private String getPath(long volumeId, OmBucketInfo bucketInfo, OmKeyInfo omKeyInfo) { + if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { + return volumeId + "/" + bucketInfo.getObjectID() + "/" + omKeyInfo.getParentObjectID() + "/" + + omKeyInfo.getFileName(); + } else { + return bucketInfo.getVolumeName() + "/" + bucketInfo.getBucketName() + "/" + omKeyInfo.getKeyName(); + } + } + + private String getPath(long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo omDirInfo) { + return volumeId + "/" + bucketInfo.getObjectID() + "/" + omDirInfo.getParentObjectID() + "/" + + omDirInfo.getName(); + } + + private String getRenameKey(String volume, String bucket, long objectId) { + return volume + "/" + bucket + "/" + objectId; + } + + @ParameterizedTest + @EnumSource(value = BucketLayout.class) + public void testPreviousSnapshotOzoneKeyInfo(BucketLayout bucketLayout) throws IOException { + OMMetadataManager omMetadataManager = mock(OMMetadataManager.class); + if (bucketLayout.isFileSystemOptimized()) { + when(omMetadataManager.getOzonePathKey(anyLong(), anyLong(), anyLong(), anyString())) + .thenAnswer(i -> Arrays.stream(i.getArguments()).map(Object::toString) + .collect(Collectors.joining("/"))); + } else { + when(omMetadataManager.getOzoneKey(anyString(), anyString(), anyString())) + .thenAnswer(i -> Arrays.stream(i.getArguments()).map(Object::toString) + .collect(Collectors.joining("/"))); + } + when(omMetadataManager.getRenameKey(anyString(), anyString(), anyLong())).thenAnswer( + i -> getRenameKey(i.getArgument(0), i.getArgument(1), i.getArgument(2))); + + OMMetadataManager previousMetadataManager = mock(OMMetadataManager.class); + OzoneConfiguration configuration = new OzoneConfiguration(); + KeyManagerImpl km = new KeyManagerImpl(null, null, omMetadataManager, configuration, null, null, null); + KeyManagerImpl prevKM = new KeyManagerImpl(null, null, previousMetadataManager, configuration, null, null, null); + long volumeId = 1L; + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setBucketName(BUCKET_NAME).setVolumeName(VOLUME_NAME) + .setObjectID(2L).setBucketLayout(bucketLayout).build(); + OmKeyInfo prevKey = getMockedOmKeyInfo(bucketInfo, 5, "key", 1); + OmKeyInfo prevKey2 = getMockedOmKeyInfo(bucketInfo, 7, "key2", 2); + OmKeyInfo currentKey = getMockedOmKeyInfo(bucketInfo, 6, "renamedKey", 1); + OmKeyInfo currentKey2 = getMockedOmKeyInfo(bucketInfo, 7, "key2", 2); + OmKeyInfo currentKey3 = getMockedOmKeyInfo(bucketInfo, 8, "key3", 3); + OmKeyInfo currentKey4 = getMockedOmKeyInfo(bucketInfo, 8, "key4", 4); + Table prevKeyTable = + getMockedTable(ImmutableMap.of( + getPath(volumeId, bucketInfo, prevKey), prevKey, + getPath(volumeId, bucketInfo, prevKey2), prevKey2)); + Table renameTable = getMockedTable( + ImmutableMap.of(getRenameKey(VOLUME_NAME, BUCKET_NAME, 1), getPath(volumeId, bucketInfo, prevKey), + getRenameKey(VOLUME_NAME, BUCKET_NAME, 3), getPath(volumeId, bucketInfo, + getMockedOmKeyInfo(bucketInfo, 6, "unknownKey", 9)))); + when(previousMetadataManager.getKeyTable(eq(bucketLayout))).thenReturn(prevKeyTable); + when(omMetadataManager.getSnapshotRenamedTable()).thenReturn(renameTable); + assertEquals(prevKey, km.getPreviousSnapshotOzoneKeyInfo(volumeId, bucketInfo, currentKey).apply(prevKM)); + assertEquals(prevKey2, km.getPreviousSnapshotOzoneKeyInfo(volumeId, bucketInfo, currentKey2).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneKeyInfo(volumeId, bucketInfo, currentKey3).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneKeyInfo(volumeId, bucketInfo, currentKey4).apply(prevKM)); + } + + @Test + public void testPreviousSnapshotOzoneDirInfo() throws IOException { + OMMetadataManager omMetadataManager = mock(OMMetadataManager.class); + when(omMetadataManager.getOzonePathKey(anyLong(), anyLong(), anyLong(), anyString())) + .thenAnswer(i -> Arrays.stream(i.getArguments()).map(Object::toString) + .collect(Collectors.joining("/"))); + when(omMetadataManager.getRenameKey(anyString(), anyString(), anyLong())).thenAnswer( + i -> getRenameKey(i.getArgument(0), i.getArgument(1), i.getArgument(2))); + + OMMetadataManager previousMetadataManager = mock(OMMetadataManager.class); + OzoneConfiguration configuration = new OzoneConfiguration(); + KeyManagerImpl km = new KeyManagerImpl(null, null, omMetadataManager, configuration, null, null, null); + KeyManagerImpl prevKM = new KeyManagerImpl(null, null, previousMetadataManager, configuration, null, null, null); + long volumeId = 1L; + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setBucketName(BUCKET_NAME).setVolumeName(VOLUME_NAME) + .setObjectID(2L).setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build(); + OmDirectoryInfo prevKey = getMockedOmDirInfo(5, "key", 1); + OmDirectoryInfo prevKey2 = getMockedOmDirInfo(7, "key2", 2); + OmKeyInfo currentKey = getMockedOmKeyInfo(bucketInfo, 6, "renamedKey", 1); + OmDirectoryInfo currentKeyDir = getMockedOmDirInfo(6, "renamedKey", 1); + OmKeyInfo currentKey2 = getMockedOmKeyInfo(bucketInfo, 7, "key2", 2); + OmDirectoryInfo currentKeyDir2 = getMockedOmDirInfo(7, "key2", 2); + OmKeyInfo currentKey3 = getMockedOmKeyInfo(bucketInfo, 8, "key3", 3); + OmDirectoryInfo currentKeyDir3 = getMockedOmDirInfo(8, "key3", 3); + OmKeyInfo currentKey4 = getMockedOmKeyInfo(bucketInfo, 8, "key4", 4); + OmDirectoryInfo currentKeyDir4 = getMockedOmDirInfo(8, "key4", 4); + Table prevDirTable = + getMockedTable(ImmutableMap.of( + getPath(volumeId, bucketInfo, prevKey), prevKey, + getPath(volumeId, bucketInfo, prevKey2), prevKey2)); + Table renameTable = getMockedTable( + ImmutableMap.of(getRenameKey(VOLUME_NAME, BUCKET_NAME, 1), getPath(volumeId, bucketInfo, prevKey), + getRenameKey(VOLUME_NAME, BUCKET_NAME, 3), getPath(volumeId, bucketInfo, + getMockedOmKeyInfo(bucketInfo, 6, "unknownKey", 9)))); + when(previousMetadataManager.getDirectoryTable()).thenReturn(prevDirTable); + when(omMetadataManager.getSnapshotRenamedTable()).thenReturn(renameTable); + assertEquals(prevKey, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey).apply(prevKM)); + assertEquals(prevKey2, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey2).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey3).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey4).apply(prevKM)); + + assertEquals(prevKey, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir).apply(prevKM)); + assertEquals(prevKey2, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir2).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir3).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir4).apply(prevKM)); + } + private void initKeyTableForMultipartTest(String keyName, String volume) throws IOException { List locationInfoGroups = new ArrayList<>(); List locationInfoList = new ArrayList<>(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 33cbf84e85d8..65fdddeb1320 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -737,7 +737,7 @@ public CheckedFunction getPreviousSnap long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo keyInfo) throws IOException { String currentKeyPath = metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(), keyInfo.getParentObjectID(), keyInfo.getName()); - return getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, keyInfo.getObjectID(), currentKeyPath, + return getPreviousSnapshotOzonePathInfo(bucketInfo, keyInfo.getObjectID(), currentKeyPath, (km) -> km.getMetadataManager().getDirectoryTable()); } @@ -746,8 +746,8 @@ public CheckedFunction getPreviousSnap long volumeId, OmBucketInfo bucketInfo, OmKeyInfo keyInfo) throws IOException { String currentKeyPath = metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(), keyInfo.getParentObjectID(), keyInfo.getFileName()); - return getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, keyInfo.getObjectID(), currentKeyPath, - (km) -> km.getMetadataManager().getDirectoryTable()); + return getPreviousSnapshotOzonePathInfo(bucketInfo, keyInfo.getObjectID(), currentKeyPath, + (previousSnapshotKM) -> previousSnapshotKM.getMetadataManager().getDirectoryTable()); } @Override @@ -757,18 +757,18 @@ public CheckedFunction getPreviousSnapshotOz ? metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(), keyInfo.getParentObjectID(), keyInfo.getFileName()) : metadataManager.getOzoneKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName(), keyInfo.getKeyName()); - return getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, keyInfo.getObjectID(), currentKeyPath, - (km) -> km.getMetadataManager().getKeyTable(bucketInfo.getBucketLayout())); + return getPreviousSnapshotOzonePathInfo(bucketInfo, keyInfo.getObjectID(), currentKeyPath, + (previousSnapshotKM) -> previousSnapshotKM.getMetadataManager().getKeyTable(bucketInfo.getBucketLayout())); } - private CheckedFunction getPreviousSnapshotOzoneDirInfo( - long volumeId, OmBucketInfo bucketInfo, long objectId, String currentKeyPath, + private CheckedFunction getPreviousSnapshotOzonePathInfo( + OmBucketInfo bucketInfo, long objectId, String currentKeyPath, Function> table) throws IOException { String renameKey = metadataManager.getRenameKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName(), objectId); String renamedKey = metadataManager.getSnapshotRenamedTable().getIfExist(renameKey); - - return (km) -> table.apply(km).get(renamedKey == null ? renameKey : currentKeyPath); + return (previousSnapshotKM) -> table.apply(previousSnapshotKM).get( + renamedKey != null ? renamedKey : currentKeyPath); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java index cfb13b227a27..93ea35465763 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java @@ -52,7 +52,9 @@ protected Boolean isReclaimable(Table.KeyValue renameEntry) thro Table prevDirTable = null; if (previousSnapshot != null) { previousKeyTable = previousSnapshot.get().getMetadataManager().getKeyTable(getBucketInfo().getBucketLayout()); - prevDirTable = previousSnapshot.get().getMetadataManager().getDirectoryTable(); + if (getBucketInfo().getBucketLayout().isFileSystemOptimized()) { + prevDirTable = previousSnapshot.get().getMetadataManager().getDirectoryTable(); + } } return isRenameEntryReclaimable(renameEntry, prevDirTable, previousKeyTable); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java new file mode 100644 index 000000000000..0a65b5919b9d --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java @@ -0,0 +1,320 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; +import static org.mockito.Mockito.CALLS_REAL_METHODS; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.DBOptions; +import org.rocksdb.ReadOptions; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksDBException; +import org.rocksdb.RocksIterator; + +/** + * Test class for ReclaimableFilter. + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class TestAbstractReclaimableFilter { + + private ReclaimableFilter reclaimableFilter; + private OzoneManager ozoneManager; + private OmSnapshotManager omSnapshotManager; + private AtomicReference> lockIds = new AtomicReference<>(Collections.emptyList()); + private List volumes; + private List buckets; + private MockedStatic mockedSnapshotUtils; + private Map> snapshotInfos; + @TempDir + private Path testDir; + private SnapshotChainManager snapshotChainManager; + private KeyManager keyManager; + + protected abstract ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, + SnapshotInfo currentSnapshotInfo, KeyManager km, + IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain); + + protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, + int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume) throws RocksDBException, IOException { + return setup(numberOfPreviousSnapshotsFromChain, actualTotalNumberOfSnapshotsInChain, index, numberOfVolumes, + numberOfBucketsPerVolume, (info) -> info, BucketLayout.FILE_SYSTEM_OPTIMIZED); + } + + protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, + int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume, BucketLayout bucketLayout) + throws RocksDBException, IOException { + return setup(numberOfPreviousSnapshotsFromChain, actualTotalNumberOfSnapshotsInChain, index, numberOfVolumes, + numberOfBucketsPerVolume, (info) -> info, bucketLayout); + } + + protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, + int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume, Function snapshotProps, + BucketLayout bucketLayout) throws IOException, RocksDBException { + this.ozoneManager = mock(OzoneManager.class); + this.snapshotChainManager = mock(SnapshotChainManager.class); + this.keyManager = mock(KeyManager.class); + IOzoneManagerLock ozoneManagerLock = mock(IOzoneManagerLock.class); + when(ozoneManagerLock.acquireReadLocks(eq(OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK), anyList())) + .thenAnswer(i -> { + lockIds.set( + (List) i.getArgument(1, List.class).stream().map(val -> UUID.fromString(((String[]) val)[0])) + .collect(Collectors.toList())); + return OMLockDetails.EMPTY_DETAILS_LOCK_ACQUIRED; + }); + when(ozoneManagerLock.releaseReadLocks(eq(OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK), anyList())) + .thenAnswer(i -> { + Assertions.assertEquals(lockIds.get(), + i.getArgument(1, List.class).stream().map(val -> UUID.fromString(((String[]) val)[0])) + .collect(Collectors.toList())); + lockIds.set(Collections.emptyList()); + return OMLockDetails.EMPTY_DETAILS_LOCK_NOT_ACQUIRED; + }); + snapshotInfos = mockSnapshotChain(actualTotalNumberOfSnapshotsInChain, + ozoneManager, snapshotChainManager, numberOfVolumes, numberOfBucketsPerVolume, snapshotProps); + mockOzoneManager(bucketLayout); + mockOmSnapshotManager(ozoneManager); + SnapshotInfo info = index >= actualTotalNumberOfSnapshotsInChain ? null : + snapshotInfos.get(getKey(volumes.get(volumes.size() - 1), buckets.get(buckets.size() - 1))).get(index); + this.reclaimableFilter = Mockito.spy(initializeFilter(ozoneManager, omSnapshotManager, snapshotChainManager, + info, keyManager, ozoneManagerLock, numberOfPreviousSnapshotsFromChain)); + return info; + } + + @AfterEach + protected void teardown() throws IOException { + this.mockedSnapshotUtils.close(); + this.reclaimableFilter.close(); + } + + private void mockOzoneManager(BucketLayout bucketLayout) throws IOException { + OMMetadataManager metadataManager = mock(OMMetadataManager.class); + when(ozoneManager.getMetadataManager()).thenReturn(metadataManager); + long volumeCount = 0; + long bucketCount = 0; + for (String volume : volumes) { + when(metadataManager.getVolumeId(eq(volume))).thenReturn(volumeCount); + for (String bucket : buckets) { + when(ozoneManager.getBucketInfo(eq(volume), eq(bucket))) + .thenReturn(OmBucketInfo.newBuilder().setVolumeName(volume).setBucketName(bucket) + .setObjectID(bucketCount).setBucketLayout(bucketLayout).build()); + bucketCount++; + } + volumeCount++; + } + } + + private void mockOmSnapshotManager(OzoneManager om) throws RocksDBException, IOException { + try (MockedStatic rocksdb = Mockito.mockStatic(ManagedRocksDB.class); + MockedConstruction mockedCache = Mockito.mockConstruction(SnapshotCache.class, + (mock, context) -> { + Map> map = new HashMap<>(); + when(mock.get(any(UUID.class))).thenAnswer(i -> { + if (snapshotInfos.values().stream().flatMap(List::stream) + .map(SnapshotInfo::getSnapshotId) + .noneMatch(id -> id.equals(i.getArgument(0, UUID.class)))) { + throw new IOException("Snapshot " + i.getArgument(0, UUID.class) + " not found"); + } + return map.computeIfAbsent(i.getArgument(0, UUID.class), (k) -> { + ReferenceCounted ref = mock(ReferenceCounted.class); + OmSnapshot omSnapshot = mock(OmSnapshot.class); + when(omSnapshot.getSnapshotID()).thenReturn(k); + when(ref.get()).thenReturn(omSnapshot); + return ref; + }); + }); + })) { + ManagedRocksDB managedRocksDB = mock(ManagedRocksDB.class); + RocksDB rocksDB = mock(RocksDB.class); + rocksdb.when(() -> ManagedRocksDB.open(any(DBOptions.class), anyString(), anyList(), anyList())) + .thenReturn(managedRocksDB); + RocksIterator emptyRocksIterator = mock(RocksIterator.class); + when(emptyRocksIterator.isValid()).thenReturn(false); + when(rocksDB.newIterator(any(ColumnFamilyHandle.class), any(ReadOptions.class))).thenReturn(emptyRocksIterator); + when(rocksDB.newIterator(any(ColumnFamilyHandle.class))).thenReturn(emptyRocksIterator); + OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); + DBStore dbStore = mock(RDBStore.class); + when(metadataManager.getStore()).thenReturn(dbStore); + when(dbStore.getRocksDBCheckpointDiffer()).thenReturn(Mockito.mock(RocksDBCheckpointDiffer.class)); + Table mockedTransactionTable = Mockito.mock(Table.class); + when(metadataManager.getTransactionInfoTable()).thenReturn(mockedTransactionTable); + when(mockedTransactionTable.getSkipCache(eq(TRANSACTION_INFO_KEY))) + .thenReturn(TransactionInfo.valueOf(0, 10)); + when(managedRocksDB.get()).thenReturn(rocksDB); + + when(rocksDB.createColumnFamily(any(ColumnFamilyDescriptor.class))) + .thenAnswer(i -> { + ColumnFamilyDescriptor descriptor = i.getArgument(0, ColumnFamilyDescriptor.class); + ColumnFamilyHandle ch = Mockito.mock(ColumnFamilyHandle.class); + when(ch.getName()).thenReturn(descriptor.getName()); + return ch; + }); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(OZONE_METADATA_DIRS, testDir.toAbsolutePath().toFile().getAbsolutePath()); + when(om.getConfiguration()).thenReturn(conf); + when(om.isFilesystemSnapshotEnabled()).thenReturn(true); + this.omSnapshotManager = new OmSnapshotManager(om); + } + } + + protected List getLastSnapshotInfos(String volume, String bucket, int numberOfSnapshotsInChain, + int index) { + List infos = getSnapshotInfos().get(getKey(volume, bucket)); + int endIndex = Math.min(index - 1, infos.size() - 1); + return IntStream.range(endIndex - numberOfSnapshotsInChain + 1, endIndex + 1).mapToObj(i -> i >= 0 ? + infos.get(i) : null).collect(Collectors.toList()); + } + + private Map> mockSnapshotChain( + int numberOfSnaphotsInChain, OzoneManager om, SnapshotChainManager chainManager, int numberOfVolumes, + int numberOfBuckets, Function snapshotInfoProp) { + volumes = IntStream.range(0, numberOfVolumes).mapToObj(i -> "volume" + i).collect(Collectors.toList()); + buckets = IntStream.range(0, numberOfBuckets).mapToObj(i -> "bucket" + i).collect(Collectors.toList()); + Map> bucketSnapshotMap = new HashMap<>(); + for (String volume : volumes) { + for (String bucket : buckets) { + bucketSnapshotMap.computeIfAbsent(getKey(volume, bucket), (k) -> new ArrayList<>()); + } + } + mockedSnapshotUtils = mockStatic(SnapshotUtils.class, CALLS_REAL_METHODS); + for (int i = 0; i < numberOfSnaphotsInChain; i++) { + for (String volume : volumes) { + for (String bucket : buckets) { + SnapshotInfo snapshotInfo = snapshotInfoProp.apply(SnapshotInfo.newInstance(volume, bucket, + "snap" + i, UUID.randomUUID(), 0)); + List infos = bucketSnapshotMap.get(getKey(volume, bucket)); + mockedSnapshotUtils.when(() -> SnapshotUtils.getSnapshotInfo(eq(ozoneManager), + eq(snapshotInfo.getTableKey()))).thenReturn(snapshotInfo); + mockedSnapshotUtils.when(() -> SnapshotUtils.getPreviousSnapshot(eq(om), eq(chainManager), + eq(snapshotInfo))).thenReturn(infos.isEmpty() ? null : infos.get(infos.size() - 1)); + infos.add(snapshotInfo); + } + } + } + + for (String volume : volumes) { + for (String bucket : buckets) { + mockedSnapshotUtils.when(() -> SnapshotUtils.getLatestSnapshotInfo( + eq(volume), eq(bucket), eq(om), eq(chainManager))) + .thenAnswer(i -> { + List infos = bucketSnapshotMap.get(getKey(volume, bucket)); + return infos.isEmpty() ? null : infos.get(infos.size() - 1); + }); + } + } + return bucketSnapshotMap; + } + + public static String getKey(String volume, String bucket) { + return volume + "/" + bucket; + } + + public Map> getSnapshotInfos() { + return snapshotInfos; + } + + public SnapshotChainManager getSnapshotChainManager() { + return snapshotChainManager; + } + + public ReclaimableFilter getReclaimableFilter() { + return reclaimableFilter; + } + + public AtomicReference> getLockIds() { + return lockIds; + } + + public List getBuckets() { + return buckets; + } + + public List getVolumes() { + return volumes; + } + + public OzoneManager getOzoneManager() { + return ozoneManager; + } + + public MockedStatic getMockedSnapshotUtils() { + return mockedSnapshotUtils; + } + + public OmSnapshotManager getOmSnapshotManager() { + return omSnapshotManager; + } + + public KeyManager getKeyManager() { + return keyManager; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java new file mode 100644 index 000000000000..590d96e165db --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.rocksdb.RocksDBException; + +/** + * Test class for ReclaimableDirFilter. + */ +public class TestReclaimableDirFilter extends TestAbstractReclaimableFilter { + @Override + protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, SnapshotInfo currentSnapshotInfo, + KeyManager km, IOzoneManagerLock lock, + int numberOfPreviousSnapshotsFromChain) { + return new ReclaimableDirFilter(om, snapshotManager, chainManager, currentSnapshotInfo, km, lock); + } + + List testReclaimableFilterArguments() { + List arguments = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 5; j++) { + arguments.add(Arguments.of(i, j)); + } + } + return arguments; + } + + private void testReclaimableDirFilter(String volume, String bucket, int index, + OmKeyInfo dirInfo, OmDirectoryInfo prevDirInfo, + Boolean expectedValue) + throws IOException { + List snapshotInfos = getLastSnapshotInfos(volume, bucket, 1, index); + SnapshotInfo prevSnapshotInfo = snapshotInfos.get(0); + OmBucketInfo bucketInfo = getOzoneManager().getBucketInfo(volume, bucket); + long volumeId = getOzoneManager().getMetadataManager().getVolumeId(volume); + KeyManager keyManager = getKeyManager(); + if (prevSnapshotInfo != null) { + ReferenceCounted prevSnap = Optional.ofNullable(prevSnapshotInfo) + .map(info -> { + try { + return getOmSnapshotManager().getActiveSnapshot(volume, bucket, info.getName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).orElse(null); + mockOmSnapshot(prevSnap); + when(keyManager.getPreviousSnapshotOzoneDirInfo(eq(volumeId), eq(bucketInfo), eq(dirInfo))) + .thenReturn((km) -> prevDirInfo); + } + + when(dirInfo.getVolumeName()).thenReturn(volume); + when(dirInfo.getBucketName()).thenReturn(bucket); + assertEquals(expectedValue, getReclaimableFilter().apply(Table.newKeyValue("key", dirInfo))); + } + + private OmKeyInfo getMockedOmKeyInfo(long objectId) { + OmKeyInfo keyInfo = mock(OmKeyInfo.class); + when(keyInfo.getObjectID()).thenReturn(objectId); + return keyInfo; + } + + private OmDirectoryInfo getMockedOmDirInfo(long objectId) { + OmDirectoryInfo keyInfo = mock(OmDirectoryInfo.class); + when(keyInfo.getObjectID()).thenReturn(objectId); + return keyInfo; + } + + private KeyManager mockOmSnapshot(ReferenceCounted snapshot) { + if (snapshot != null) { + OmSnapshot omSnapshot = snapshot.get(); + KeyManager keyManager = mock(KeyManager.class); + when(omSnapshot.getKeyManager()).thenReturn(keyManager); + return keyManager; + } + return null; + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testNonReclaimableDirectory(int actualNumberOfSnapshots, int index) throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo dirInfo = getMockedOmKeyInfo(1); + OmDirectoryInfo prevDirectoryInfo = index - 1 >= 0 ? getMockedOmDirInfo(1) : null; + testReclaimableDirFilter(volume, bucket, index, dirInfo, prevDirectoryInfo, prevDirectoryInfo == null); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableKeyWithDifferentObjId(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo dirInfo = getMockedOmKeyInfo(1); + OmDirectoryInfo prevDirectoryInfo = index - 1 >= 0 ? getMockedOmDirInfo(2) : null; + testReclaimableDirFilter(volume, bucket, index, dirInfo, prevDirectoryInfo, true); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java index 687f484a74b2..a9a07ec9d446 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java @@ -17,88 +17,46 @@ package org.apache.hadoop.ozone.om.snapshot.filter; -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyList; -import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.when; import java.io.IOException; import java.io.UncheckedIOException; -import java.nio.file.Path; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.Objects; import java.util.UUID; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.KeyManager; -import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; -import org.apache.hadoop.ozone.om.lock.OMLockDetails; -import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; -import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import org.mockito.MockedConstruction; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.DBOptions; -import org.rocksdb.ReadOptions; -import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; -import org.rocksdb.RocksIterator; /** - * Test class for ReclaimableFilter. + * Test class for ReclaimableFilter testing general initializing of snapshot chain. */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) -public class TestReclaimableFilter { - - private ReclaimableFilter reclaimableFilter; - private OzoneManager ozoneManager; - private OmSnapshotManager omSnapshotManager; - private AtomicReference> lockIds = new AtomicReference<>(Collections.emptyList()); - private List volumes; - private List buckets; - private MockedStatic mockedSnapshotUtils; - private Map> snapshotInfos; - @TempDir - private Path testDir; - private SnapshotChainManager snapshotChainManager; +public class TestReclaimableFilter extends TestAbstractReclaimableFilter { protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, SnapshotChainManager chainManager, @@ -123,142 +81,9 @@ protected Boolean isReclaimable(Table.KeyValue keyValue) throws }; } - public SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, - int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, - int numberOfBucketsPerVolume) throws RocksDBException, IOException { - return setup(numberOfPreviousSnapshotsFromChain, actualTotalNumberOfSnapshotsInChain, index, numberOfVolumes, - numberOfBucketsPerVolume, (info) -> info); - } - - public SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, - int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, - int numberOfBucketsPerVolume, Function snapshotProps) - throws IOException, RocksDBException { - this.ozoneManager = mock(OzoneManager.class); - this.snapshotChainManager = mock(SnapshotChainManager.class); - KeyManager keyManager = mock(KeyManager.class); - IOzoneManagerLock ozoneManagerLock = mock(IOzoneManagerLock.class); - when(ozoneManagerLock.acquireReadLocks(eq(OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK), anyList())) - .thenAnswer(i -> { - lockIds.set( - (List) i.getArgument(1, List.class).stream().map(val -> UUID.fromString(((String[]) val)[0])) - .collect(Collectors.toList())); - return OMLockDetails.EMPTY_DETAILS_LOCK_ACQUIRED; - }); - when(ozoneManagerLock.releaseReadLocks(eq(OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK), anyList())) - .thenAnswer(i -> { - Assertions.assertEquals(lockIds.get(), - i.getArgument(1, List.class).stream().map(val -> UUID.fromString(((String[]) val)[0])) - .collect(Collectors.toList())); - lockIds.set(Collections.emptyList()); - return OMLockDetails.EMPTY_DETAILS_LOCK_NOT_ACQUIRED; - }); - snapshotInfos = mockSnapshotChain(actualTotalNumberOfSnapshotsInChain, - ozoneManager, snapshotChainManager, numberOfVolumes, numberOfBucketsPerVolume, snapshotProps); - mockOmSnapshotManager(ozoneManager); - SnapshotInfo info = index >= actualTotalNumberOfSnapshotsInChain ? null : - snapshotInfos.get(getKey(volumes.get(volumes.size() - 1), buckets.get(buckets.size() - 1))).get(index); - this.reclaimableFilter = Mockito.spy(initializeFilter(ozoneManager, omSnapshotManager, snapshotChainManager, - info, keyManager, ozoneManagerLock, numberOfPreviousSnapshotsFromChain)); - return info; - } - @AfterEach - public void teardown() throws IOException { - this.mockedSnapshotUtils.close(); - this.reclaimableFilter.close(); - } - - private void mockOmSnapshotManager(OzoneManager om) throws RocksDBException, IOException { - try (MockedStatic rocksdb = Mockito.mockStatic(ManagedRocksDB.class); - MockedConstruction mockedCache = Mockito.mockConstruction(SnapshotCache.class, - (mock, context) -> { - when(mock.get(any(UUID.class))).thenAnswer(i -> { - if (snapshotInfos.values().stream().flatMap(List::stream) - .map(SnapshotInfo::getSnapshotId) - .noneMatch(id -> id.equals(i.getArgument(0, UUID.class)))) { - throw new IOException("Snapshot " + i.getArgument(0, UUID.class) + " not found"); - } - ReferenceCounted referenceCounted = mock(ReferenceCounted.class); - OmSnapshot omSnapshot = mock(OmSnapshot.class); - when(omSnapshot.getSnapshotID()).thenReturn(i.getArgument(0, UUID.class)); - when(referenceCounted.get()).thenReturn(omSnapshot); - return referenceCounted; - }); - })) { - ManagedRocksDB managedRocksDB = mock(ManagedRocksDB.class); - RocksDB rocksDB = mock(RocksDB.class); - rocksdb.when(() -> ManagedRocksDB.open(any(DBOptions.class), anyString(), anyList(), anyList())) - .thenReturn(managedRocksDB); - RocksIterator emptyRocksIterator = mock(RocksIterator.class); - when(emptyRocksIterator.isValid()).thenReturn(false); - when(rocksDB.newIterator(any(ColumnFamilyHandle.class), any(ReadOptions.class))).thenReturn(emptyRocksIterator); - when(rocksDB.newIterator(any(ColumnFamilyHandle.class))).thenReturn(emptyRocksIterator); - OMMetadataManager metadataManager = mock(OMMetadataManager.class); - DBStore dbStore = mock(RDBStore.class); - when(metadataManager.getStore()).thenReturn(dbStore); - when(dbStore.getRocksDBCheckpointDiffer()).thenReturn(Mockito.mock(RocksDBCheckpointDiffer.class)); - when(ozoneManager.getMetadataManager()).thenReturn(metadataManager); - Table mockedTransactionTable = Mockito.mock(Table.class); - when(metadataManager.getTransactionInfoTable()).thenReturn(mockedTransactionTable); - when(mockedTransactionTable.getSkipCache(eq(TRANSACTION_INFO_KEY))) - .thenReturn(TransactionInfo.valueOf(0, 10)); - when(managedRocksDB.get()).thenReturn(rocksDB); - - when(rocksDB.createColumnFamily(any(ColumnFamilyDescriptor.class))) - .thenAnswer(i -> { - ColumnFamilyDescriptor descriptor = i.getArgument(0, ColumnFamilyDescriptor.class); - ColumnFamilyHandle ch = Mockito.mock(ColumnFamilyHandle.class); - when(ch.getName()).thenReturn(descriptor.getName()); - return ch; - }); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, testDir.toAbsolutePath().toFile().getAbsolutePath()); - when(om.getConfiguration()).thenReturn(conf); - when(om.isFilesystemSnapshotEnabled()).thenReturn(true); - this.omSnapshotManager = new OmSnapshotManager(om); - } - } - - private Map> mockSnapshotChain( - int numberOfSnaphotsInChain, OzoneManager om, SnapshotChainManager chainManager, int numberOfVolumes, - int numberOfBuckets, Function snapshotInfoProp) { - volumes = IntStream.range(0, numberOfVolumes).mapToObj(i -> "volume" + i).collect(Collectors.toList()); - buckets = IntStream.range(0, numberOfBuckets).mapToObj(i -> "bucket" + i).collect(Collectors.toList()); - Map> bucketSnapshotMap = new HashMap<>(); - for (String volume : volumes) { - for (String bucket : buckets) { - bucketSnapshotMap.computeIfAbsent(getKey(volume, bucket), (k) -> new ArrayList<>()); - } - } - mockedSnapshotUtils = mockStatic(SnapshotUtils.class, CALLS_REAL_METHODS); - for (int i = 0; i < numberOfSnaphotsInChain; i++) { - for (String volume : volumes) { - for (String bucket : buckets) { - SnapshotInfo snapshotInfo = snapshotInfoProp.apply(SnapshotInfo.newInstance(volume, bucket, - "snap" + i, UUID.randomUUID(), 0)); - List infos = bucketSnapshotMap.get(getKey(volume, bucket)); - mockedSnapshotUtils.when(() -> SnapshotUtils.getSnapshotInfo(eq(ozoneManager), - eq(snapshotInfo.getTableKey()))).thenReturn(snapshotInfo); - mockedSnapshotUtils.when(() -> SnapshotUtils.getPreviousSnapshot(eq(om), eq(chainManager), - eq(snapshotInfo))).thenReturn(infos.isEmpty() ? null : infos.get(infos.size() - 1)); - infos.add(snapshotInfo); - } - } - } - - for (String volume : volumes) { - for (String bucket : buckets) { - mockedSnapshotUtils.when(() -> SnapshotUtils.getLatestSnapshotInfo( - eq(volume), eq(bucket), eq(om), eq(chainManager))) - .thenAnswer(i -> { - List infos = bucketSnapshotMap.get(getKey(volume, bucket)); - return infos.isEmpty() ? null : infos.get(infos.size() - 1); - }); - } - } - return bucketSnapshotMap; - + protected void teardown() throws IOException { + super.teardown(); } List testReclaimableFilterArguments() { @@ -273,28 +98,21 @@ List testReclaimableFilterArguments() { return arguments; } - private List getLastSnapshotInfos(String volume, String bucket, int numberOfSnapshotsInChain, - int index) { - List infos = snapshotInfos.get(getKey(volume, bucket)); - int endIndex = Math.min(index - 1, infos.size() - 1); - return IntStream.range(endIndex - numberOfSnapshotsInChain + 1, endIndex + 1).mapToObj(i -> i >= 0 ? - infos.get(i) : null).collect(Collectors.toList()); - } - private void testSnapshotInitAndLocking(String volume, String bucket, int numberOfPreviousSnapshotsFromChain, int index, SnapshotInfo currentSnapshotInfo, Boolean reclaimable, Boolean expectedReturnValue) throws IOException { List infos = getLastSnapshotInfos(volume, bucket, numberOfPreviousSnapshotsFromChain, index); - assertEquals(expectedReturnValue, reclaimableFilter.apply(Table.newKeyValue(getKey(volume, bucket), reclaimable))); - Assertions.assertEquals(infos, reclaimableFilter.getPreviousSnapshotInfos()); - Assertions.assertEquals(infos.size(), reclaimableFilter.getPreviousOmSnapshots().size()); + assertEquals(expectedReturnValue, + getReclaimableFilter().apply(Table.newKeyValue(getKey(volume, bucket), reclaimable))); + Assertions.assertEquals(infos, getReclaimableFilter().getPreviousSnapshotInfos()); + Assertions.assertEquals(infos.size(), getReclaimableFilter().getPreviousOmSnapshots().size()); Assertions.assertEquals(infos.stream().map(si -> si == null ? null : si.getSnapshotId()) - .collect(Collectors.toList()), reclaimableFilter.getPreviousOmSnapshots().stream() + .collect(Collectors.toList()), getReclaimableFilter().getPreviousOmSnapshots().stream() .map(i -> i == null ? null : ((ReferenceCounted) i).get().getSnapshotID()) .collect(Collectors.toList())); infos.add(currentSnapshotInfo); Assertions.assertEquals(infos.stream().filter(Objects::nonNull).map(SnapshotInfo::getSnapshotId).collect( - Collectors.toList()), lockIds.get()); + Collectors.toList()), getLockIds().get()); } @ParameterizedTest @@ -304,8 +122,8 @@ public void testReclaimableFilterSnapshotChainInitilization(int numberOfPrevious int index) throws IOException, RocksDBException { SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 2); - String volume = volumes.get(3); - String bucket = buckets.get(1); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); testSnapshotInitAndLocking(volume, bucket, numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, true, true); testSnapshotInitAndLocking(volume, bucket, numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, false, @@ -319,8 +137,8 @@ public void testReclaimableFilterWithBucketVolumeMismatch(int numberOfPreviousSn int index) throws IOException, RocksDBException { SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 4); - AtomicReference volume = new AtomicReference<>(volumes.get(2)); - AtomicReference bucket = new AtomicReference<>(buckets.get(3)); + AtomicReference volume = new AtomicReference<>(getVolumes().get(2)); + AtomicReference bucket = new AtomicReference<>(getBuckets().get(3)); if (currentSnapshotInfo == null) { testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, true, true); @@ -334,8 +152,8 @@ public void testReclaimableFilterWithBucketVolumeMismatch(int numberOfPreviousSn + currentSnapshotInfo + " not matching for key in volume: " + volume + " bucket: " + bucket, ex.getMessage()); } - volume.set(volumes.get(3)); - bucket.set(buckets.get(2)); + volume.set(getVolumes().get(3)); + bucket.set(getBuckets().get(2)); if (currentSnapshotInfo == null) { testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, true, true); @@ -359,22 +177,24 @@ public void testReclaimabilityOnSnapshotAddition(int numberOfPreviousSnapshotsFr SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 4); - AtomicReference volume = new AtomicReference<>(volumes.get(3)); - AtomicReference bucket = new AtomicReference<>(buckets.get(3)); + AtomicReference volume = new AtomicReference<>(getVolumes().get(3)); + AtomicReference bucket = new AtomicReference<>(getBuckets().get(3)); - when(reclaimableFilter.isReclaimable(any(Table.KeyValue.class))).thenAnswer(i -> { + when(getReclaimableFilter().isReclaimable(any(Table.KeyValue.class))).thenAnswer(i -> { if (i.getArgument(0) == null) { return null; } SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volume.get(), bucket.get(), "snap" + actualNumberOfSnapshots, UUID.randomUUID(), 0); - SnapshotInfo prevSnapshot = SnapshotUtils.getLatestSnapshotInfo(volume.get(), bucket.get(), ozoneManager, - snapshotChainManager); - mockedSnapshotUtils.when(() -> SnapshotUtils.getSnapshotInfo(eq(ozoneManager), eq(snapshotInfo.getTableKey()))) + SnapshotInfo prevSnapshot = SnapshotUtils.getLatestSnapshotInfo(volume.get(), bucket.get(), getOzoneManager(), + getSnapshotChainManager()); + getMockedSnapshotUtils().when( + () -> SnapshotUtils.getSnapshotInfo(eq(getOzoneManager()), eq(snapshotInfo.getTableKey()))) .thenReturn(snapshotInfo); - mockedSnapshotUtils.when(() -> SnapshotUtils.getPreviousSnapshot(eq(ozoneManager), eq(this.snapshotChainManager), - eq(snapshotInfo))).thenReturn(prevSnapshot); - snapshotInfos.get(getKey(volume.get(), bucket.get())).add(snapshotInfo); + getMockedSnapshotUtils().when( + () -> SnapshotUtils.getPreviousSnapshot(eq(getOzoneManager()), eq(getSnapshotChainManager()), + eq(snapshotInfo))).thenReturn(prevSnapshot); + getSnapshotInfos().get(getKey(volume.get(), bucket.get())).add(snapshotInfo); return i.callRealMethod(); }); @@ -406,15 +226,16 @@ public void testInitWithInactiveSnapshots(int numberOfPreviousSnapshotsFromChain int snapIndex) throws IOException, RocksDBException { SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 1, 1, (snapshotInfo) -> { - if (snapshotInfo.getVolumeName().equals(volumes.get(0)) && snapshotInfo.getBucketName().equals(buckets.get(0)) + if (snapshotInfo.getVolumeName().equals(getVolumes().get(0)) && + snapshotInfo.getBucketName().equals(getBuckets().get(0)) && snapshotInfo.getName().equals("snap" + snapIndex)) { snapshotInfo.setSnapshotStatus(SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED); } return snapshotInfo; - }); + }, BucketLayout.FILE_SYSTEM_OPTIMIZED); - AtomicReference volume = new AtomicReference<>(volumes.get(0)); - AtomicReference bucket = new AtomicReference<>(buckets.get(0)); + AtomicReference volume = new AtomicReference<>(getVolumes().get(0)); + AtomicReference bucket = new AtomicReference<>(getBuckets().get(0)); int endIndex = Math.min(index - 1, actualNumberOfSnapshots - 1); int beginIndex = Math.max(0, endIndex - numberOfPreviousSnapshotsFromChain + 1); if (snapIndex < beginIndex || snapIndex > endIndex) { @@ -440,7 +261,8 @@ public void testInitWithUnflushedSnapshots(int numberOfPreviousSnapshotsFromChai int snapIndex) throws IOException, RocksDBException { SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 4, (snapshotInfo) -> { - if (snapshotInfo.getVolumeName().equals(volumes.get(3)) && snapshotInfo.getBucketName().equals(buckets.get(3)) + if (snapshotInfo.getVolumeName().equals(getVolumes().get(3)) && + snapshotInfo.getBucketName().equals(getBuckets().get(3)) && snapshotInfo.getName().equals("snap" + snapIndex)) { try { snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(0, 11).toByteString()); @@ -449,10 +271,10 @@ public void testInitWithUnflushedSnapshots(int numberOfPreviousSnapshotsFromChai } } return snapshotInfo; - }); + }, BucketLayout.FILE_SYSTEM_OPTIMIZED); - AtomicReference volume = new AtomicReference<>(volumes.get(3)); - AtomicReference bucket = new AtomicReference<>(buckets.get(3)); + AtomicReference volume = new AtomicReference<>(getVolumes().get(3)); + AtomicReference bucket = new AtomicReference<>(getBuckets().get(3)); int endIndex = Math.min(index - 1, actualNumberOfSnapshots - 1); int beginIndex = Math.max(0, endIndex - numberOfPreviousSnapshotsFromChain + 1); if (snapIndex < beginIndex || snapIndex > endIndex) { @@ -465,11 +287,7 @@ public void testInitWithUnflushedSnapshots(int numberOfPreviousSnapshotsFromChai testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, true, true)); assertEquals(String.format("Changes made to the snapshot %s have not been flushed to the disk ", - snapshotInfos.get(getKey(volume.get(), bucket.get())).get(snapIndex)), ex.getMessage()); + getSnapshotInfos().get(getKey(volume.get(), bucket.get())).get(snapIndex)), ex.getMessage()); } } - - public static String getKey(String volume, String bucket) { - return volume + "/" + bucket; - } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableKeyFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableKeyFilter.java new file mode 100644 index 000000000000..88cdbeca5a54 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableKeyFilter.java @@ -0,0 +1,267 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.rocksdb.RocksDBException; + +/** + * Test class for ReclaimableKeyFilter. + */ +public class TestReclaimableKeyFilter extends TestAbstractReclaimableFilter { + @Override + protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, SnapshotInfo currentSnapshotInfo, + KeyManager km, IOzoneManagerLock lock, + int numberOfPreviousSnapshotsFromChain) { + return new ReclaimableKeyFilter(om, snapshotManager, chainManager, currentSnapshotInfo, km, lock); + } + + List testReclaimableFilterArguments() { + List arguments = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 5; j++) { + arguments.add(Arguments.of(i, j)); + } + } + return arguments; + } + + private KeyManager mockOmSnapshot(ReferenceCounted snapshot) { + if (snapshot != null) { + OmSnapshot omSnapshot = snapshot.get(); + KeyManager keyManager = mock(KeyManager.class); + when(omSnapshot.getKeyManager()).thenReturn(keyManager); + return keyManager; + } + return null; + } + + @SuppressWarnings("checkstyle:ParameterNumber") + private void testReclaimableKeyFilter(String volume, String bucket, int index, + OmKeyInfo keyInfo, OmKeyInfo prevKeyInfo, OmKeyInfo prevPrevKeyInfo, + Boolean expectedValue, + Optional size, Optional replicatedSize) + throws IOException { + List snapshotInfos = getLastSnapshotInfos(volume, bucket, 2, index); + SnapshotInfo previousToPreviousSapshotInfo = snapshotInfos.get(0); + SnapshotInfo prevSnapshotInfo = snapshotInfos.get(1); + OmBucketInfo bucketInfo = getOzoneManager().getBucketInfo(volume, bucket); + long volumeId = getOzoneManager().getMetadataManager().getVolumeId(volume); + + ReferenceCounted prevSnap = Optional.ofNullable(prevSnapshotInfo) + .map(info -> { + try { + return getOmSnapshotManager().getActiveSnapshot(volume, bucket, info.getName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).orElse(null); + ReferenceCounted prevToPrevSnap = Optional.ofNullable(previousToPreviousSapshotInfo) + .map(info -> { + try { + return getOmSnapshotManager().getActiveSnapshot(volume, bucket, info.getName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).orElse(null); + + KeyManager keyManager = getKeyManager(); + KeyManager prevKeyManager = mockOmSnapshot(prevSnap); + KeyManager prevToPrevKeyManager = mockOmSnapshot(prevToPrevSnap); + if (prevKeyManager != null) { + when(keyManager.getPreviousSnapshotOzoneKeyInfo(eq(volumeId), + eq(bucketInfo), eq(keyInfo))) + .thenReturn((km) -> prevKeyInfo); + } + if (prevKeyInfo != null && prevKeyManager != null && prevToPrevKeyManager != null) { + when(prevKeyManager.getPreviousSnapshotOzoneKeyInfo(eq(volumeId), + eq(bucketInfo), eq(prevKeyInfo))).thenReturn((km) -> prevPrevKeyInfo); + } + when(keyInfo.getVolumeName()).thenReturn(volume); + when(keyInfo.getBucketName()).thenReturn(bucket); + assertEquals(expectedValue, getReclaimableFilter().apply(Table.newKeyValue("key", keyInfo))); + ReclaimableKeyFilter keyFilter = (ReclaimableKeyFilter) getReclaimableFilter(); + if (prevSnap != null) { + assertEquals(size.map(AtomicLong::get).orElse(null), + keyFilter.getExclusiveSizeMap().get(prevSnap.get().getSnapshotID())); + assertEquals(replicatedSize.map(AtomicLong::get).orElse(null), + keyFilter.getExclusiveReplicatedSizeMap().get(prevSnap.get().getSnapshotID())); + } else { + assertTrue(keyFilter.getExclusiveReplicatedSizeMap().isEmpty()); + assertTrue(keyFilter.getExclusiveSizeMap().isEmpty()); + } + + } + + private OmKeyInfo getMockedOmKeyInfo(long objectId, long size, long replicatedSize) { + OmKeyInfo keyInfo = mock(OmKeyInfo.class); + when(keyInfo.getObjectID()).thenReturn(objectId); + when(keyInfo.getDataSize()).thenReturn(size); + when(keyInfo.getReplicatedSize()).thenReturn(replicatedSize); + return keyInfo; + } + + private OmKeyInfo getMockedOmKeyInfo(long objectId) { + return getMockedOmKeyInfo(objectId, 0, 0); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testNonReclaimableKey(int actualNumberOfSnapshots, int index) throws IOException, RocksDBException { + setup(2, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo keyInfo = getMockedOmKeyInfo(1); + OmKeyInfo prevKeyInfo = index - 1 >= 0 ? getMockedOmKeyInfo(1) : null; + OmKeyInfo prevPrevKeyInfo = index - 2 >= 0 ? getMockedOmKeyInfo(3) : null; + if (prevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevKeyInfo), eq(keyInfo))) + .thenReturn(true); + } + if (prevPrevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevPrevKeyInfo), eq(prevKeyInfo))) + .thenReturn(true); + } + Optional size = Optional.ofNullable(prevKeyInfo).map(i -> new AtomicLong()); + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo, + prevKeyInfo == null, size, size); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableKeyWithDifferentObjId(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(2, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo keyInfo = getMockedOmKeyInfo(1); + OmKeyInfo prevKeyInfo = index - 1 >= 0 ? getMockedOmKeyInfo(2) : null; + OmKeyInfo prevPrevKeyInfo = index - 2 >= 0 ? getMockedOmKeyInfo(3) : null; + if (prevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevKeyInfo), eq(keyInfo))) + .thenReturn(true); + } + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo, + true, Optional.empty(), Optional.empty()); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableKeyWithDifferentBlockIds(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(2, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo keyInfo = getMockedOmKeyInfo(1); + OmKeyInfo prevKeyInfo = index - 1 >= 0 ? getMockedOmKeyInfo(1) : null; + OmKeyInfo prevPrevKeyInfo = index - 2 >= 0 ? getMockedOmKeyInfo(3) : null; + if (prevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevKeyInfo), eq(keyInfo))) + .thenReturn(false); + } + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo, + true, Optional.empty(), Optional.empty()); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testExclusiveSizeCalculationWithNonReclaimableKey(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(2, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo keyInfo = getMockedOmKeyInfo(1, 1, 4); + OmKeyInfo prevKeyInfo = index - 1 >= 0 ? getMockedOmKeyInfo(1, 2, 5) : null; + OmKeyInfo prevPrevKeyInfo = index - 2 >= 0 ? getMockedOmKeyInfo(1, 3, 6) : null; + if (prevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevKeyInfo), eq(keyInfo))) + .thenReturn(true); + } + if (prevPrevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevPrevKeyInfo), eq(prevKeyInfo))) + .thenReturn(true); + } + + Optional size = Optional.ofNullable(prevKeyInfo) + .map(i -> prevPrevKeyInfo == null ? new AtomicLong(2) : null); + Optional replicatedSize = Optional.ofNullable(prevKeyInfo) + .map(i -> prevPrevKeyInfo == null ? new AtomicLong(5) : null); + + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo, + prevKeyInfo == null, size, replicatedSize); + if (prevPrevKeyInfo != null) { + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevPrevKeyInfo), eq(prevKeyInfo))) + .thenReturn(false); + } + if (prevKeyInfo != null) { + size = Optional.of(size.orElse(new AtomicLong())); + replicatedSize = Optional.of(replicatedSize.orElse(new AtomicLong())); + size.get().addAndGet(2L); + replicatedSize.get().addAndGet(5L); + } + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo, + prevKeyInfo == null, size, replicatedSize); + OmKeyInfo prevPrevKeyInfo1; + if (prevPrevKeyInfo != null) { + prevPrevKeyInfo1 = getMockedOmKeyInfo(2, 3, 4); + getMockedSnapshotUtils().when(() -> SnapshotUtils.isBlockLocationInfoSame(eq(prevPrevKeyInfo1), eq(prevKeyInfo))) + .thenReturn(true); + } else { + prevPrevKeyInfo1 = null; + } + + if (prevKeyInfo != null) { + size = Optional.of(size.orElse(new AtomicLong())); + replicatedSize = Optional.of(replicatedSize.orElse(new AtomicLong())); + size.get().addAndGet(2L); + replicatedSize.get().addAndGet(5L); + } + testReclaimableKeyFilter(volume, bucket, index, keyInfo, prevKeyInfo, prevPrevKeyInfo1, + prevKeyInfo == null, size, replicatedSize); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableRenameEntryFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableRenameEntryFilter.java new file mode 100644 index 000000000000..50ba1acc4e48 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableRenameEntryFilter.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableMap; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.UUID; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.rocksdb.RocksDBException; + +/** + * Test class for ReclaimableDirFilter. + */ +public class TestReclaimableRenameEntryFilter extends TestAbstractReclaimableFilter { + @Override + protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, SnapshotInfo currentSnapshotInfo, + KeyManager km, IOzoneManagerLock lock, + int numberOfPreviousSnapshotsFromChain) { + return new ReclaimableRenameEntryFilter(om, snapshotManager, chainManager, currentSnapshotInfo, km, lock); + } + + List testReclaimableFilterArguments() { + List arguments = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 5; j++) { + arguments.add(Arguments.of(i, j)); + } + } + return arguments; + } + + private void testReclaimableRenameEntryFilter(String volume, String bucket, int index, + String value, + Table keyTable, + Table dirTable, + Boolean expectedValue) + throws IOException { + List snapshotInfos = getLastSnapshotInfos(volume, bucket, 1, index); + SnapshotInfo prevSnapshotInfo = snapshotInfos.get(0); + OmBucketInfo bucketInfo = getOzoneManager().getBucketInfo(volume, bucket); + if (prevSnapshotInfo != null) { + ReferenceCounted prevSnap = Optional.ofNullable(prevSnapshotInfo) + .map(info -> { + try { + return getOmSnapshotManager().getActiveSnapshot(volume, bucket, info.getName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).orElse(null); + mockOmSnapshot(prevSnap, bucketInfo, keyTable, dirTable); + } + String key = bucketInfo.getVolumeName() + "/" + bucketInfo.getBucketName() + "/" + 1; + String[] keySplit = key.split("/"); + KeyManager km = getKeyManager(); + OMMetadataManager omMetadataManager = mock(OMMetadataManager.class); + when(km.getMetadataManager()).thenReturn(omMetadataManager); + when(omMetadataManager.splitRenameKey(eq(key))).thenReturn(keySplit); + assertEquals(expectedValue, getReclaimableFilter().apply(Table.newKeyValue(key, value))); + } + + private Table getMockedTable(Map map) throws IOException { + Table table = mock(Table.class); + when(table.get(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); + when(table.getIfExist(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); + return table; + } + + private Table getFailingMockedTable() throws IOException { + Table table = mock(Table.class); + when(table.get(anyString())).thenThrow(new IOException()); + when(table.getIfExist(anyString())).thenThrow(new IOException()); + return table; + } + + private KeyManager mockOmSnapshot(ReferenceCounted snapshot, + OmBucketInfo bucketInfo, Table keyTable, + Table dirTable) { + if (snapshot != null) { + OmSnapshot omSnapshot = snapshot.get(); + KeyManager keyManager = mock(KeyManager.class); + OMMetadataManager omMetadataManager = mock(OMMetadataManager.class); + when(omSnapshot.getMetadataManager()).thenReturn(omMetadataManager); + when(keyManager.getMetadataManager()).thenReturn(omMetadataManager); + when(omMetadataManager.getKeyTable(eq(bucketInfo.getBucketLayout()))).thenReturn(keyTable); + when(omMetadataManager.getDirectoryTable()).thenReturn(dirTable); + return keyManager; + } + return null; + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testNonReclaimableRenameEntryWithKeyNonFSO(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2, + BucketLayout.OBJECT_STORE); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + String value = UUID.randomUUID().toString(); + Table keyTable = getMockedTable(ImmutableMap.of(value, mock(OmKeyInfo.class))); + Table directoryTable = getFailingMockedTable(); + testReclaimableRenameEntryFilter(volume, bucket, index, value, keyTable, directoryTable, index == 0); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableRenameEntryWithKeyNonFSO(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2, + BucketLayout.OBJECT_STORE); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + String value = UUID.randomUUID().toString(); + Table keyTable = getMockedTable(Collections.emptyMap()); + Table directoryTable = getFailingMockedTable(); + testReclaimableRenameEntryFilter(volume, bucket, index, value, keyTable, directoryTable, true); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableRenameEntryWithFSO(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + String value = UUID.randomUUID().toString(); + Table keyTable = getMockedTable(Collections.emptyMap()); + Table directoryTable = getMockedTable(Collections.emptyMap()); + testReclaimableRenameEntryFilter(volume, bucket, index, value, keyTable, directoryTable, true); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testNonReclaimableRenameEntryWithFileFSO(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + String value = UUID.randomUUID().toString(); + Table keyTable = getMockedTable(ImmutableMap.of(value, mock(OmKeyInfo.class))); + Table directoryTable = getMockedTable(Collections.emptyMap()); + testReclaimableRenameEntryFilter(volume, bucket, index, value, keyTable, directoryTable, index == 0); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testNonReclaimableRenameEntryWithDirFSO(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + String value = UUID.randomUUID().toString(); + Table keyTable = getMockedTable(Collections.emptyMap()); + Table directoryTable = getMockedTable(ImmutableMap.of(value, mock(OmDirectoryInfo.class))); + testReclaimableRenameEntryFilter(volume, bucket, index, value, keyTable, directoryTable, index == 0); + } +}