From 7327b47cf0609b66f7ca7c2a4c8ca587758aa983 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 11 Mar 2025 15:22:22 -0700 Subject: [PATCH 01/24] HDDS-12559. Implement Bulk Ozone Locks for taking locks on multiple snapshots Change-Id: If1a980e5550dda6775c3af53b14a1eab71206a43 --- .../ozone/om/lock/IOzoneManagerLock.java | 13 ++ .../hadoop/ozone/om/lock/OmReadOnlyLock.java | 21 +++ .../ozone/om/lock/OzoneManagerLock.java | 120 +++++++++++++++- .../ozone/om/lock/TestOzoneManagerLock.java | 32 +++++ .../ozone/om/snapshot/MultiSnapshotLocks.java | 86 +++++++++++ .../om/snapshot/TestMultiSnapshotLocks.java | 136 ++++++++++++++++++ 6 files changed, 407 insertions(+), 1 deletion(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java index fac864b2135a..6926b7d9bf23 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.lock; import com.google.common.annotations.VisibleForTesting; +import java.util.Collection; /** * Interface for OM Metadata locks. @@ -27,9 +28,15 @@ public interface IOzoneManagerLock { OMLockDetails acquireReadLock(OzoneManagerLock.Resource resource, String... resources); + OMLockDetails acquireReadLocks(OzoneManagerLock.Resource resource, Collection resources); + + OMLockDetails acquireWriteLock(OzoneManagerLock.Resource resource, String... resources); + OMLockDetails acquireWriteLocks(OzoneManagerLock.Resource resource, + Collection resources); + boolean acquireMultiUserLock(String firstUser, String secondUser); void releaseMultiUserLock(String firstUser, String secondUser); @@ -37,9 +44,15 @@ OMLockDetails acquireWriteLock(OzoneManagerLock.Resource resource, OMLockDetails releaseWriteLock(OzoneManagerLock.Resource resource, String... resources); + OMLockDetails releaseWriteLocks(OzoneManagerLock.Resource resource, + Collection resources); + OMLockDetails releaseReadLock(OzoneManagerLock.Resource resource, String... resources); + OMLockDetails releaseReadLocks(OzoneManagerLock.Resource resource, + Collection resources); + @VisibleForTesting int getReadHoldCount(OzoneManagerLock.Resource resource, String... resources); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OmReadOnlyLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OmReadOnlyLock.java index b1b4296cba7d..059536fe0a58 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OmReadOnlyLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OmReadOnlyLock.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.om.lock.OMLockDetails.EMPTY_DETAILS_LOCK_ACQUIRED; import static org.apache.hadoop.ozone.om.lock.OMLockDetails.EMPTY_DETAILS_LOCK_NOT_ACQUIRED; +import java.util.Collection; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource; /** @@ -34,12 +35,22 @@ public OMLockDetails acquireReadLock(Resource resource, String... resources) { return EMPTY_DETAILS_LOCK_ACQUIRED; } + @Override + public OMLockDetails acquireReadLocks(Resource resource, Collection resources) { + return EMPTY_DETAILS_LOCK_ACQUIRED; + } + @Override public OMLockDetails acquireWriteLock(Resource resource, String... resources) { return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; } + @Override + public OMLockDetails acquireWriteLocks(Resource resource, Collection resources) { + return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; + } + @Override public boolean acquireMultiUserLock(String firstUser, String secondUser) { return false; @@ -56,11 +67,21 @@ public OMLockDetails releaseWriteLock(Resource resource, return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; } + @Override + public OMLockDetails releaseWriteLocks(Resource resource, Collection resources) { + return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; + } + @Override public OMLockDetails releaseReadLock(Resource resource, String... resources) { return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; } + @Override + public OMLockDetails releaseReadLocks(Resource resource, Collection resources) { + return EMPTY_DETAILS_LOCK_NOT_ACQUIRED; + } + @Override public int getReadHoldCount(Resource resource, String... resources) { return 0; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index ab33a4f0c24d..c91619379ea6 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -27,14 +27,18 @@ import com.google.common.util.concurrent.Striped; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.EnumMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.CompositeKey; import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.ipc.ProcessingDetails.Timing; import org.apache.hadoop.ipc.Server; @@ -122,6 +126,12 @@ private Striped createStripeLock(Resource r, return SimpleStriped.readWriteLock(size, fair); } + private Iterable bulkGetLock(Resource resource, Collection keys) { + Striped striped = stripedLockByResource.get(resource); + return striped.bulkGet(keys.stream().filter(Objects::nonNull) + .map(CompositeKey::combineKeys).collect(Collectors.toList())); + } + private ReentrantReadWriteLock getLock(Resource resource, String... keys) { Striped striped = stripedLockByResource.get(resource); Object key = combineKeys(keys); @@ -150,6 +160,28 @@ public OMLockDetails acquireReadLock(Resource resource, String... keys) { return acquireLock(resource, true, keys); } + /** + * Acquire read locks on a list of resources. + * + * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same + * thread acquiring lock again is allowed. + * + * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread + * acquiring lock again is not allowed. + * + * Special Note for USER_LOCK: Single thread can acquire single user lock/ + * multi user lock. But not both at the same time. + * @param resource - Type of the resource. + * @param keys - A list of Resource names on which user want to acquire locks. + * For Resource type BUCKET_LOCK, first param should be volume, second param + * should be bucket name. For remaining all resource only one param should + * be passed. + */ + @Override + public OMLockDetails acquireReadLocks(Resource resource, Collection keys) { + return acquireLocks(resource, true, keys); + } + /** * Acquire write lock on resource. * @@ -172,8 +204,56 @@ public OMLockDetails acquireWriteLock(Resource resource, String... keys) { return acquireLock(resource, false, keys); } + /** + * Acquire write locks on a list of resources. + * + * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same + * thread acquiring lock again is allowed. + * + * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread + * acquiring lock again is not allowed. + * + * Special Note for USER_LOCK: Single thread can acquire single user lock/ + * multi user lock. But not both at the same time. + * @param resource - Type of the resource. + * @param keys - A list of Resource names on which user want to acquire lock. + * For Resource type BUCKET_LOCK, first param should be volume, second param + * should be bucket name. For remaining all resource only one param should + * be passed. + */ + @Override + public OMLockDetails acquireWriteLocks(Resource resource, Collection keys) { + return acquireLocks(resource, false, keys); + } + + private OMLockDetails acquireLocks(Resource resource, boolean isReadLock, + Collection keys) { + omLockDetails.get().clear(); + if (!resource.canLock(lockSet.get())) { + String errorMessage = getErrorMessage(resource); + LOG.error(errorMessage); + throw new RuntimeException(errorMessage); + } + + long startWaitingTimeNanos = Time.monotonicNowNanos(); + + for (ReadWriteLock lock : bulkGetLock(resource, keys)) { + if (isReadLock) { + lock.readLock().lock(); + updateReadLockMetrics(resource, (ReentrantReadWriteLock) lock, startWaitingTimeNanos); + } else { + lock.writeLock().lock(); + updateWriteLockMetrics(resource, (ReentrantReadWriteLock) lock, startWaitingTimeNanos); + } + } + + lockSet.set(resource.setLock(lockSet.get())); + omLockDetails.get().setLockAcquired(true); + return omLockDetails.get(); + } + private OMLockDetails acquireLock(Resource resource, boolean isReadLock, - String... keys) { + String[] keys) { omLockDetails.get().clear(); if (!resource.canLock(lockSet.get())) { String errorMessage = getErrorMessage(resource); @@ -317,6 +397,11 @@ public OMLockDetails releaseWriteLock(Resource resource, String... keys) { return releaseLock(resource, false, keys); } + @Override + public OMLockDetails releaseWriteLocks(Resource resource, Collection keys) { + return releaseLocks(resource, false, keys); + } + /** * Release read lock on resource. * @param resource - Type of the resource. @@ -330,6 +415,19 @@ public OMLockDetails releaseReadLock(Resource resource, String... keys) { return releaseLock(resource, true, keys); } + /** + * Release read locks on a list of resources. + * @param resource - Type of the resource. + * @param keys - Resource names on which user want to acquire lock. + * For Resource type BUCKET_LOCK, first param should be volume, second param + * should be bucket name. For remaining all resource only one param should + * be passed. + */ + @Override + public OMLockDetails releaseReadLocks(Resource resource, Collection keys) { + return releaseLocks(resource, true, keys); + } + private OMLockDetails releaseLock(Resource resource, boolean isReadLock, String... keys) { omLockDetails.get().clear(); @@ -347,6 +445,26 @@ private OMLockDetails releaseLock(Resource resource, boolean isReadLock, return omLockDetails.get(); } + private OMLockDetails releaseLocks(Resource resource, boolean isReadLock, + Collection keys) { + omLockDetails.get().clear(); + Iterable locks = bulkGetLock(resource, keys); + + for (ReadWriteLock lock : locks) { + if (isReadLock) { + lock.readLock().unlock(); + updateReadUnlockMetrics(resource, (ReentrantReadWriteLock) lock); + } else { + boolean isWriteLocked = ((ReentrantReadWriteLock)lock).isWriteLockedByCurrentThread(); + lock.writeLock().unlock(); + updateWriteUnlockMetrics(resource, (ReentrantReadWriteLock) lock, isWriteLocked); + } + } + + lockSet.set(resource.clearLock(lockSet.get())); + return omLockDetails.get(); + } + private void updateReadUnlockMetrics(Resource resource, ReentrantReadWriteLock lock) { /* diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index 4cd44cba4b1a..ca986a84639d 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -25,6 +25,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Stack; import java.util.UUID; @@ -287,6 +288,37 @@ void testLockResourceParallel() throws Exception { } + @Test + void testMultiLocksResourceParallel() throws Exception { + OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); + + for (Resource resource : Resource.values()) { + final List resourceName = Arrays.asList(generateResourceName(resource), + generateResourceName(resource), generateResourceName(resource)); + lock.acquireWriteLocks(resource, resourceName.subList(1, resourceName.size())); + + AtomicBoolean gotLock = new AtomicBoolean(false); + new Thread(() -> { + lock.acquireWriteLocks(resource, resourceName.subList(0, 2)); + gotLock.set(true); + lock.releaseWriteLocks(resource, resourceName.subList(0, 2)); + }).start(); + // Let's give some time for the new thread to run + Thread.sleep(100); + // Since the new thread is trying to get lock on same resource, + // it will wait. + assertFalse(gotLock.get()); + lock.releaseWriteLocks(resource, resourceName.subList(1, resourceName.size())); + // Since we have released the lock, the new thread should have the lock + // now. + // Let's give some time for the new thread to run + Thread.sleep(100); + assertTrue(gotLock.get()); + } + + } + + @Test void testMultiLockResourceParallel() throws Exception { OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java new file mode 100644 index 000000000000..9b8b0db2a698 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import com.google.common.annotations.VisibleForTesting; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.stream.Collectors; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; + +/** + * Class to take multiple locks on multiple snapshots. + */ +public class MultiSnapshotLocks { + private final List objectLocks; + private final IOzoneManagerLock lock; + private final OzoneManagerLock.Resource resource; + private final boolean writeLock; + private OMLockDetails lockDetails; + + public MultiSnapshotLocks(IOzoneManagerLock lock, OzoneManagerLock.Resource resource, boolean writeLock) { + this.writeLock = writeLock; + this.resource = resource; + this.lock = lock; + this.objectLocks = new ArrayList<>(); + this.lockDetails = OMLockDetails.EMPTY_DETAILS_LOCK_NOT_ACQUIRED; + } + + public OMLockDetails acquireLock(Collection ids) throws OMException { + if (!objectLocks.isEmpty()) { + throw new OMException("More locks cannot be acquired when locks have been already acquired. Locks acquired : " + + objectLocks.stream().map(Arrays::toString).collect(Collectors.toList()), + OMException.ResultCodes.INTERNAL_ERROR); + } + List keys = + ids.stream().filter(Objects::nonNull).map(id -> new String[] {id.toString()}) + .collect(Collectors.toList()); + OMLockDetails omLockDetails = this.writeLock ? lock.acquireWriteLocks(resource, keys) : + lock.acquireReadLocks(resource, keys); + if (omLockDetails.isLockAcquired()) { + objectLocks.addAll(keys); + } + this.lockDetails = omLockDetails; + return omLockDetails; + } + + public void releaseLock() { + if (this.writeLock) { + lockDetails = lock.releaseWriteLocks(resource, this.objectLocks); + } else { + lockDetails = lock.releaseReadLocks(resource, this.objectLocks); + } + this.objectLocks.clear(); + } + + @VisibleForTesting + public List getObjectLocks() { + return objectLocks; + } + + public boolean isLockAcquired() { + return lockDetails.isLockAcquired(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java new file mode 100644 index 000000000000..741f1d30c36e --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentMatchers; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +/** + * Class to test class MultiLocks. + */ +@ExtendWith(MockitoExtension.class) +public class TestMultiSnapshotLocks { + @Mock + private IOzoneManagerLock mockLock; + + @Mock + private OzoneManagerLock.Resource mockResource; + + private MultiSnapshotLocks multiSnapshotLocks; + private UUID obj1 = UUID.randomUUID(); + private UUID obj2 = UUID.randomUUID(); + + @BeforeEach + void setUp() { + // Initialize MultiLocks with mock dependencies + multiSnapshotLocks = new MultiSnapshotLocks(mockLock, mockResource, true); + } + + @Test + void testAcquireLockSuccess() throws Exception { + List objects = Arrays.asList(obj1, obj2); + OMLockDetails mockLockDetails = mock(OMLockDetails.class); + when(mockLockDetails.isLockAcquired()).thenReturn(true); + + // Simulate successful lock acquisition for each object + when(mockLock.acquireWriteLocks(eq(mockResource), anyList())).thenReturn(mockLockDetails); + + OMLockDetails result = multiSnapshotLocks.acquireLock(objects); + + assertEquals(mockLockDetails, result); + verify(mockLock, times(1)).acquireWriteLocks(ArgumentMatchers.eq(mockResource), any()); + } + + @Test + void testAcquireLockFailureReleasesAll() throws Exception { + + List objects = Arrays.asList(obj1, obj2); + OMLockDetails failedLockDetails = mock(OMLockDetails.class); + when(failedLockDetails.isLockAcquired()).thenReturn(false); + + // Simulate failure during lock acquisition + when(mockLock.acquireWriteLocks(eq(mockResource), anyCollection())).thenReturn(failedLockDetails); + + OMLockDetails result = multiSnapshotLocks.acquireLock(objects); + + assertEquals(failedLockDetails, result); + assertTrue(multiSnapshotLocks.getObjectLocks().isEmpty()); + } + + @Test + void testReleaseLock() throws Exception { + List objects = Arrays.asList(obj1, obj2); + OMLockDetails mockLockDetails = mock(OMLockDetails.class); + when(mockLockDetails.isLockAcquired()).thenReturn(true); + + // Acquire locks first + when(mockLock.acquireWriteLocks(eq(mockResource), anyCollection())).thenReturn(mockLockDetails); + multiSnapshotLocks.acquireLock(objects); + assertFalse(multiSnapshotLocks.getObjectLocks().isEmpty()); + + // Now release locks + multiSnapshotLocks.releaseLock(); + + // Verify that locks are released in order + verify(mockLock).releaseWriteLocks(eq(mockResource), any()); + assertTrue(multiSnapshotLocks.getObjectLocks().isEmpty()); + } + + @Test + void testAcquireLockWhenAlreadyAcquiredThrowsException() throws Exception { + List objects = Collections.singletonList(obj1); + OMLockDetails mockLockDetails = mock(OMLockDetails.class); + when(mockLockDetails.isLockAcquired()).thenReturn(true); + + // Acquire a lock first + when(mockLock.acquireWriteLocks(any(), anyList())).thenReturn(mockLockDetails); + multiSnapshotLocks.acquireLock(objects); + + // Try acquiring locks again without releasing + OMException exception = assertThrows(OMException.class, () -> multiSnapshotLocks.acquireLock(objects)); + + assertEquals( + String.format("More locks cannot be acquired when locks have been already acquired. Locks acquired : [[%s]]", + obj1.toString()), exception.getMessage()); + } +} From bd5b0c6870c29804f3527e2b2026c6b4c3a671bb Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 11 Mar 2025 15:47:46 -0700 Subject: [PATCH 02/24] HDDS-12560. Reclaimable Filter for Snaphost Garbage Collections Change-Id: Ia8cd0c367e54c978d7534e7bd3702803c8cdf38e --- .../hadoop/ozone/util/CheckedFunction.java | 31 ++ .../ozone/om/lock/OzoneManagerLock.java | 3 +- .../om/snapshot/filter/ReclaimableFilter.java | 235 +++++++++++++ .../om/snapshot/filter/package-info.java | 21 ++ .../filter/TestAbstractReclaimableFilter.java | 320 ++++++++++++++++++ .../filter/TestReclaimableFilter.java | 288 ++++++++++++++++ 6 files changed, 897 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java new file mode 100644 index 000000000000..0d565cbf3f7a --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +/** + * + * Represents a function that accepts one argument and produces a result. + * This is a functional interface whose functional method is apply(Object). + * Type parameters: + * – the type of the input to the function + * – the type of the result of the function + * - the type of exception thrown. + */ +public interface CheckedFunction { + R apply(T t) throws E; +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index c91619379ea6..4efdd22c0f26 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -571,7 +571,8 @@ public enum Resource { S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31 KEY_PATH_LOCK((byte) 5, "KEY_PATH_LOCK"), //63 PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127 - SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"); // = 255 + SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"), // = 255 + SNAPSHOT_GC_LOCK((byte) 8, "SNAPSHOT_GC_LOCK"); // level of the resource private byte lockLevel; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java new file mode 100644 index 000000000000..dcbc2f44e6e8 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.MultiSnapshotLocks; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.hadoop.ozone.util.CheckedFunction; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class is responsible for opening last N snapshot given a snapshot metadata manager or AOS metadata manager by + * acquiring a lock. + */ +public abstract class ReclaimableFilter implements CheckedFunction, + Boolean, IOException>, Closeable { + + private static final Logger LOG = LoggerFactory.getLogger(ReclaimableFilter.class); + + private final OzoneManager ozoneManager; + private final SnapshotInfo currentSnapshotInfo; + private final OmSnapshotManager omSnapshotManager; + private final SnapshotChainManager snapshotChainManager; + + private final List previousSnapshotInfos; + private final List> previousOmSnapshots; + private final MultiSnapshotLocks snapshotIdLocks; + private Long volumeId; + private OmBucketInfo bucketInfo; + private final KeyManager keyManager; + private final int numberOfPreviousSnapshotsFromChain; + + /** + * Filter to return deleted keys/directories which are reclaimable based on their presence in previous snapshot in + * the snapshot chain. + * + * @param currentSnapshotInfo : If null the deleted keys in AOS needs to be processed, hence the latest snapshot + * in the snapshot chain corresponding to bucket key needs to be processed. + * @param keyManager : KeyManager corresponding to snapshot or AOS. + * @param lock : Lock for Active OM. + */ + public ReclaimableFilter(OzoneManager ozoneManager, OmSnapshotManager omSnapshotManager, + SnapshotChainManager snapshotChainManager, + SnapshotInfo currentSnapshotInfo, KeyManager keyManager, + IOzoneManagerLock lock, + int numberOfPreviousSnapshotsFromChain) { + this.ozoneManager = ozoneManager; + this.omSnapshotManager = omSnapshotManager; + this.currentSnapshotInfo = currentSnapshotInfo; + this.snapshotChainManager = snapshotChainManager; + this.snapshotIdLocks = new MultiSnapshotLocks(lock, OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK, false); + this.keyManager = keyManager; + this.numberOfPreviousSnapshotsFromChain = numberOfPreviousSnapshotsFromChain; + this.previousOmSnapshots = new ArrayList<>(numberOfPreviousSnapshotsFromChain); + this.previousSnapshotInfos = new ArrayList<>(numberOfPreviousSnapshotsFromChain); + } + + private List getLastNSnapshotInChain(String volume, String bucket) throws IOException { + if (currentSnapshotInfo != null && + (!currentSnapshotInfo.getVolumeName().equals(volume) || !currentSnapshotInfo.getBucketName().equals(bucket))) { + throw new IOException("Volume & Bucket name for snapshot : " + currentSnapshotInfo + " not matching for " + + "key in volume: " + volume + " bucket: " + bucket); + } + SnapshotInfo expectedPreviousSnapshotInfo = currentSnapshotInfo == null + ? SnapshotUtils.getLatestSnapshotInfo(volume, bucket, ozoneManager, snapshotChainManager) + : SnapshotUtils.getPreviousSnapshot(ozoneManager, snapshotChainManager, currentSnapshotInfo); + List snapshotInfos = Lists.newArrayList(); + SnapshotInfo snapshotInfo = expectedPreviousSnapshotInfo; + while (snapshotInfos.size() < numberOfPreviousSnapshotsFromChain) { + // If changes made to the snapshot have not been flushed to disk, throw exception immediately, next run of + // garbage collection would process the snapshot. + if (!OmSnapshotManager.areSnapshotChangesFlushedToDB(ozoneManager.getMetadataManager(), snapshotInfo)) { + throw new IOException("Changes made to the snapshot " + snapshotInfo + " have not been flushed to the disk "); + } + snapshotInfos.add(snapshotInfo); + snapshotInfo = snapshotInfo == null ? null + : SnapshotUtils.getPreviousSnapshot(ozoneManager, snapshotChainManager, snapshotInfo); + } + + // Reversing list to get the correct order in chain. To ensure locking order is as per the chain ordering. + Collections.reverse(snapshotInfos); + return snapshotInfos; + } + + private boolean validateExistingLastNSnapshotsInChain(String volume, String bucket) throws IOException { + List expectedLastNSnapshotsInChain = getLastNSnapshotInChain(volume, bucket); + List expectedSnapshotIds = expectedLastNSnapshotsInChain.stream() + .map(snapshotInfo -> snapshotInfo == null ? null : snapshotInfo.getSnapshotId()) + .collect(Collectors.toList()); + List existingSnapshotIds = previousOmSnapshots.stream() + .map(omSnapshotReferenceCounted -> omSnapshotReferenceCounted == null ? null : + omSnapshotReferenceCounted.get().getSnapshotID()).collect(Collectors.toList()); + return expectedSnapshotIds.equals(existingSnapshotIds); + } + + // Initialize the last N snapshots in the chain by acquiring locks. Throw IOException if it fails. + private void initializePreviousSnapshotsFromChain(String volume, String bucket) throws IOException { + if (validateExistingLastNSnapshotsInChain(volume, bucket) && snapshotIdLocks.isLockAcquired()) { + return; + } + // If existing snapshotIds don't match then close all snapshots and reopen the previous N snapshots. + close(); + try { + // Acquire lock on last N snapshot & current snapshot(AOS if it is null). + List expectedLastNSnapshotsInChain = getLastNSnapshotInChain(volume, bucket); + List lockIds = expectedLastNSnapshotsInChain.stream() + .map(snapshotInfo -> snapshotInfo == null ? null : snapshotInfo.getSnapshotId()) + .collect(Collectors.toList()); + //currentSnapshotInfo for AOS will be null. + lockIds.add(currentSnapshotInfo == null ? null : currentSnapshotInfo.getSnapshotId()); + + if (snapshotIdLocks.acquireLock(lockIds).isLockAcquired()) { + for (SnapshotInfo snapshotInfo : expectedLastNSnapshotsInChain) { + if (snapshotInfo != null) { + // Fail operation if any of the previous snapshots are not active. + previousOmSnapshots.add(omSnapshotManager.getActiveSnapshot(snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), snapshotInfo.getName())); + previousSnapshotInfos.add(snapshotInfo); + } else { + previousOmSnapshots.add(null); + previousSnapshotInfos.add(null); + } + + // TODO: Getting volumeId and bucket from active OM. This would be wrong on volume & bucket renames + // support. + bucketInfo = ozoneManager.getBucketInfo(volume, bucket); + volumeId = ozoneManager.getMetadataManager().getVolumeId(volume); + } + } else { + throw new IOException("Lock acquisition failed for last N snapshots : " + + expectedLastNSnapshotsInChain + " " + currentSnapshotInfo); + } + } catch (IOException e) { + this.close(); + throw e; + } + } + + @Override + public synchronized Boolean apply(Table.KeyValue keyValue) throws IOException { + String volume = getVolumeName(keyValue); + String bucket = getBucketName(keyValue); + initializePreviousSnapshotsFromChain(volume, bucket); + boolean isReclaimable = isReclaimable(keyValue); + // This is to ensure the reclamation ran on the same previous snapshot and no change occurred in the chain + // while processing the entry. + return isReclaimable && validateExistingLastNSnapshotsInChain(volume, bucket); + } + + protected abstract String getVolumeName(Table.KeyValue keyValue) throws IOException; + + protected abstract String getBucketName(Table.KeyValue keyValue) throws IOException; + + protected abstract Boolean isReclaimable(Table.KeyValue keyValue) throws IOException; + + @Override + public void close() throws IOException { + this.snapshotIdLocks.releaseLock(); + for (ReferenceCounted previousOmSnapshot : previousOmSnapshots) { + IOUtils.close(LOG, previousOmSnapshot); + } + previousOmSnapshots.clear(); + previousSnapshotInfos.clear(); + } + + protected ReferenceCounted getPreviousOmSnapshot(int index) { + return previousOmSnapshots.get(index); + } + + protected KeyManager getKeyManager() { + return keyManager; + } + + protected Long getVolumeId() { + return volumeId; + } + + protected OmBucketInfo getBucketInfo() { + return bucketInfo; + } + + protected SnapshotInfo getPreviousSnapshotInfo(int index) { + return previousSnapshotInfos.get(index); + } + + protected OzoneManager getOzoneManager() { + return ozoneManager; + } + + @VisibleForTesting + List getPreviousSnapshotInfos() { + return previousSnapshotInfos; + } + + @VisibleForTesting + List> getPreviousOmSnapshots() { + return previousOmSnapshots; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java new file mode 100644 index 000000000000..16cdda0b6548 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Package containing filter to perform reclaimable check on snapshots. + */ +package org.apache.hadoop.ozone.om.snapshot.filter; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java new file mode 100644 index 000000000000..0a65b5919b9d --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java @@ -0,0 +1,320 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; +import static org.mockito.Mockito.CALLS_REAL_METHODS; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.DBOptions; +import org.rocksdb.ReadOptions; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksDBException; +import org.rocksdb.RocksIterator; + +/** + * Test class for ReclaimableFilter. + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class TestAbstractReclaimableFilter { + + private ReclaimableFilter reclaimableFilter; + private OzoneManager ozoneManager; + private OmSnapshotManager omSnapshotManager; + private AtomicReference> lockIds = new AtomicReference<>(Collections.emptyList()); + private List volumes; + private List buckets; + private MockedStatic mockedSnapshotUtils; + private Map> snapshotInfos; + @TempDir + private Path testDir; + private SnapshotChainManager snapshotChainManager; + private KeyManager keyManager; + + protected abstract ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, + SnapshotInfo currentSnapshotInfo, KeyManager km, + IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain); + + protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, + int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume) throws RocksDBException, IOException { + return setup(numberOfPreviousSnapshotsFromChain, actualTotalNumberOfSnapshotsInChain, index, numberOfVolumes, + numberOfBucketsPerVolume, (info) -> info, BucketLayout.FILE_SYSTEM_OPTIMIZED); + } + + protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, + int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume, BucketLayout bucketLayout) + throws RocksDBException, IOException { + return setup(numberOfPreviousSnapshotsFromChain, actualTotalNumberOfSnapshotsInChain, index, numberOfVolumes, + numberOfBucketsPerVolume, (info) -> info, bucketLayout); + } + + protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, + int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume, Function snapshotProps, + BucketLayout bucketLayout) throws IOException, RocksDBException { + this.ozoneManager = mock(OzoneManager.class); + this.snapshotChainManager = mock(SnapshotChainManager.class); + this.keyManager = mock(KeyManager.class); + IOzoneManagerLock ozoneManagerLock = mock(IOzoneManagerLock.class); + when(ozoneManagerLock.acquireReadLocks(eq(OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK), anyList())) + .thenAnswer(i -> { + lockIds.set( + (List) i.getArgument(1, List.class).stream().map(val -> UUID.fromString(((String[]) val)[0])) + .collect(Collectors.toList())); + return OMLockDetails.EMPTY_DETAILS_LOCK_ACQUIRED; + }); + when(ozoneManagerLock.releaseReadLocks(eq(OzoneManagerLock.Resource.SNAPSHOT_GC_LOCK), anyList())) + .thenAnswer(i -> { + Assertions.assertEquals(lockIds.get(), + i.getArgument(1, List.class).stream().map(val -> UUID.fromString(((String[]) val)[0])) + .collect(Collectors.toList())); + lockIds.set(Collections.emptyList()); + return OMLockDetails.EMPTY_DETAILS_LOCK_NOT_ACQUIRED; + }); + snapshotInfos = mockSnapshotChain(actualTotalNumberOfSnapshotsInChain, + ozoneManager, snapshotChainManager, numberOfVolumes, numberOfBucketsPerVolume, snapshotProps); + mockOzoneManager(bucketLayout); + mockOmSnapshotManager(ozoneManager); + SnapshotInfo info = index >= actualTotalNumberOfSnapshotsInChain ? null : + snapshotInfos.get(getKey(volumes.get(volumes.size() - 1), buckets.get(buckets.size() - 1))).get(index); + this.reclaimableFilter = Mockito.spy(initializeFilter(ozoneManager, omSnapshotManager, snapshotChainManager, + info, keyManager, ozoneManagerLock, numberOfPreviousSnapshotsFromChain)); + return info; + } + + @AfterEach + protected void teardown() throws IOException { + this.mockedSnapshotUtils.close(); + this.reclaimableFilter.close(); + } + + private void mockOzoneManager(BucketLayout bucketLayout) throws IOException { + OMMetadataManager metadataManager = mock(OMMetadataManager.class); + when(ozoneManager.getMetadataManager()).thenReturn(metadataManager); + long volumeCount = 0; + long bucketCount = 0; + for (String volume : volumes) { + when(metadataManager.getVolumeId(eq(volume))).thenReturn(volumeCount); + for (String bucket : buckets) { + when(ozoneManager.getBucketInfo(eq(volume), eq(bucket))) + .thenReturn(OmBucketInfo.newBuilder().setVolumeName(volume).setBucketName(bucket) + .setObjectID(bucketCount).setBucketLayout(bucketLayout).build()); + bucketCount++; + } + volumeCount++; + } + } + + private void mockOmSnapshotManager(OzoneManager om) throws RocksDBException, IOException { + try (MockedStatic rocksdb = Mockito.mockStatic(ManagedRocksDB.class); + MockedConstruction mockedCache = Mockito.mockConstruction(SnapshotCache.class, + (mock, context) -> { + Map> map = new HashMap<>(); + when(mock.get(any(UUID.class))).thenAnswer(i -> { + if (snapshotInfos.values().stream().flatMap(List::stream) + .map(SnapshotInfo::getSnapshotId) + .noneMatch(id -> id.equals(i.getArgument(0, UUID.class)))) { + throw new IOException("Snapshot " + i.getArgument(0, UUID.class) + " not found"); + } + return map.computeIfAbsent(i.getArgument(0, UUID.class), (k) -> { + ReferenceCounted ref = mock(ReferenceCounted.class); + OmSnapshot omSnapshot = mock(OmSnapshot.class); + when(omSnapshot.getSnapshotID()).thenReturn(k); + when(ref.get()).thenReturn(omSnapshot); + return ref; + }); + }); + })) { + ManagedRocksDB managedRocksDB = mock(ManagedRocksDB.class); + RocksDB rocksDB = mock(RocksDB.class); + rocksdb.when(() -> ManagedRocksDB.open(any(DBOptions.class), anyString(), anyList(), anyList())) + .thenReturn(managedRocksDB); + RocksIterator emptyRocksIterator = mock(RocksIterator.class); + when(emptyRocksIterator.isValid()).thenReturn(false); + when(rocksDB.newIterator(any(ColumnFamilyHandle.class), any(ReadOptions.class))).thenReturn(emptyRocksIterator); + when(rocksDB.newIterator(any(ColumnFamilyHandle.class))).thenReturn(emptyRocksIterator); + OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); + DBStore dbStore = mock(RDBStore.class); + when(metadataManager.getStore()).thenReturn(dbStore); + when(dbStore.getRocksDBCheckpointDiffer()).thenReturn(Mockito.mock(RocksDBCheckpointDiffer.class)); + Table mockedTransactionTable = Mockito.mock(Table.class); + when(metadataManager.getTransactionInfoTable()).thenReturn(mockedTransactionTable); + when(mockedTransactionTable.getSkipCache(eq(TRANSACTION_INFO_KEY))) + .thenReturn(TransactionInfo.valueOf(0, 10)); + when(managedRocksDB.get()).thenReturn(rocksDB); + + when(rocksDB.createColumnFamily(any(ColumnFamilyDescriptor.class))) + .thenAnswer(i -> { + ColumnFamilyDescriptor descriptor = i.getArgument(0, ColumnFamilyDescriptor.class); + ColumnFamilyHandle ch = Mockito.mock(ColumnFamilyHandle.class); + when(ch.getName()).thenReturn(descriptor.getName()); + return ch; + }); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(OZONE_METADATA_DIRS, testDir.toAbsolutePath().toFile().getAbsolutePath()); + when(om.getConfiguration()).thenReturn(conf); + when(om.isFilesystemSnapshotEnabled()).thenReturn(true); + this.omSnapshotManager = new OmSnapshotManager(om); + } + } + + protected List getLastSnapshotInfos(String volume, String bucket, int numberOfSnapshotsInChain, + int index) { + List infos = getSnapshotInfos().get(getKey(volume, bucket)); + int endIndex = Math.min(index - 1, infos.size() - 1); + return IntStream.range(endIndex - numberOfSnapshotsInChain + 1, endIndex + 1).mapToObj(i -> i >= 0 ? + infos.get(i) : null).collect(Collectors.toList()); + } + + private Map> mockSnapshotChain( + int numberOfSnaphotsInChain, OzoneManager om, SnapshotChainManager chainManager, int numberOfVolumes, + int numberOfBuckets, Function snapshotInfoProp) { + volumes = IntStream.range(0, numberOfVolumes).mapToObj(i -> "volume" + i).collect(Collectors.toList()); + buckets = IntStream.range(0, numberOfBuckets).mapToObj(i -> "bucket" + i).collect(Collectors.toList()); + Map> bucketSnapshotMap = new HashMap<>(); + for (String volume : volumes) { + for (String bucket : buckets) { + bucketSnapshotMap.computeIfAbsent(getKey(volume, bucket), (k) -> new ArrayList<>()); + } + } + mockedSnapshotUtils = mockStatic(SnapshotUtils.class, CALLS_REAL_METHODS); + for (int i = 0; i < numberOfSnaphotsInChain; i++) { + for (String volume : volumes) { + for (String bucket : buckets) { + SnapshotInfo snapshotInfo = snapshotInfoProp.apply(SnapshotInfo.newInstance(volume, bucket, + "snap" + i, UUID.randomUUID(), 0)); + List infos = bucketSnapshotMap.get(getKey(volume, bucket)); + mockedSnapshotUtils.when(() -> SnapshotUtils.getSnapshotInfo(eq(ozoneManager), + eq(snapshotInfo.getTableKey()))).thenReturn(snapshotInfo); + mockedSnapshotUtils.when(() -> SnapshotUtils.getPreviousSnapshot(eq(om), eq(chainManager), + eq(snapshotInfo))).thenReturn(infos.isEmpty() ? null : infos.get(infos.size() - 1)); + infos.add(snapshotInfo); + } + } + } + + for (String volume : volumes) { + for (String bucket : buckets) { + mockedSnapshotUtils.when(() -> SnapshotUtils.getLatestSnapshotInfo( + eq(volume), eq(bucket), eq(om), eq(chainManager))) + .thenAnswer(i -> { + List infos = bucketSnapshotMap.get(getKey(volume, bucket)); + return infos.isEmpty() ? null : infos.get(infos.size() - 1); + }); + } + } + return bucketSnapshotMap; + } + + public static String getKey(String volume, String bucket) { + return volume + "/" + bucket; + } + + public Map> getSnapshotInfos() { + return snapshotInfos; + } + + public SnapshotChainManager getSnapshotChainManager() { + return snapshotChainManager; + } + + public ReclaimableFilter getReclaimableFilter() { + return reclaimableFilter; + } + + public AtomicReference> getLockIds() { + return lockIds; + } + + public List getBuckets() { + return buckets; + } + + public List getVolumes() { + return volumes; + } + + public OzoneManager getOzoneManager() { + return ozoneManager; + } + + public MockedStatic getMockedSnapshotUtils() { + return mockedSnapshotUtils; + } + + public OmSnapshotManager getOmSnapshotManager() { + return omSnapshotManager; + } + + public KeyManager getKeyManager() { + return keyManager; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java new file mode 100644 index 000000000000..e4c586670ca0 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java @@ -0,0 +1,288 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.rocksdb.RocksDBException; + +/** + * Test class for ReclaimableFilter testing general initializing of snapshot chain. + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class TestReclaimableFilter extends TestAbstractReclaimableFilter { + + @Override + protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, + SnapshotInfo currentSnapshotInfo, KeyManager km, + IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain) { + return new ReclaimableFilter(om, snapshotManager, chainManager, currentSnapshotInfo, + km, lock, numberOfPreviousSnapshotsFromChain) { + @Override + protected String getVolumeName(Table.KeyValue keyValue) throws IOException { + return keyValue.getKey().split("/")[0]; + } + + @Override + protected String getBucketName(Table.KeyValue keyValue) throws IOException { + return keyValue.getKey().split("/")[1]; + } + + @Override + protected Boolean isReclaimable(Table.KeyValue keyValue) throws IOException { + return keyValue == null || keyValue.getValue(); + } + }; + } + + List testReclaimableFilterArguments() { + List arguments = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + for (int k = 0; k < 5; k++) { + arguments.add(Arguments.of(i, j, k)); + } + } + } + return arguments; + } + + private void testSnapshotInitAndLocking(String volume, String bucket, int numberOfPreviousSnapshotsFromChain, + int index, SnapshotInfo currentSnapshotInfo, Boolean reclaimable, + Boolean expectedReturnValue) throws IOException { + List infos = getLastSnapshotInfos(volume, bucket, numberOfPreviousSnapshotsFromChain, index); + assertEquals(expectedReturnValue, + getReclaimableFilter().apply(Table.newKeyValue(getKey(volume, bucket), reclaimable))); + Assertions.assertEquals(infos, getReclaimableFilter().getPreviousSnapshotInfos()); + Assertions.assertEquals(infos.size(), getReclaimableFilter().getPreviousOmSnapshots().size()); + Assertions.assertEquals(infos.stream().map(si -> si == null ? null : si.getSnapshotId()) + .collect(Collectors.toList()), getReclaimableFilter().getPreviousOmSnapshots().stream() + .map(i -> i == null ? null : ((ReferenceCounted) i).get().getSnapshotID()) + .collect(Collectors.toList())); + infos.add(currentSnapshotInfo); + Assertions.assertEquals(infos.stream().filter(Objects::nonNull).map(SnapshotInfo::getSnapshotId).collect( + Collectors.toList()), getLockIds().get()); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableFilterSnapshotChainInitilization(int numberOfPreviousSnapshotsFromChain, + int actualNumberOfSnapshots, + int index) throws IOException, RocksDBException { + SnapshotInfo currentSnapshotInfo = + setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + testSnapshotInitAndLocking(volume, bucket, numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, true, + true); + testSnapshotInitAndLocking(volume, bucket, numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, false, + false); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableFilterWithBucketVolumeMismatch(int numberOfPreviousSnapshotsFromChain, + int actualNumberOfSnapshots, + int index) throws IOException, RocksDBException { + SnapshotInfo currentSnapshotInfo = + setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 4); + AtomicReference volume = new AtomicReference<>(getVolumes().get(2)); + AtomicReference bucket = new AtomicReference<>(getBuckets().get(3)); + if (currentSnapshotInfo == null) { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + null, true, true); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + null, false, false); + } else { + IOException ex = assertThrows(IOException.class, () -> + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true)); + assertEquals("Volume & Bucket name for snapshot : " + + currentSnapshotInfo + " not matching for key in volume: " + volume + + " bucket: " + bucket, ex.getMessage()); + } + volume.set(getVolumes().get(3)); + bucket.set(getBuckets().get(2)); + if (currentSnapshotInfo == null) { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + null, true, true); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + null, false, false); + } else { + IOException ex = assertThrows(IOException.class, () -> + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true)); + assertEquals("Volume & Bucket name for snapshot : " + + currentSnapshotInfo + " not matching for key in volume: " + volume + + " bucket: " + bucket, ex.getMessage()); + } + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimabilityOnSnapshotAddition(int numberOfPreviousSnapshotsFromChain, + int actualNumberOfSnapshots, + int index) throws IOException, RocksDBException { + + SnapshotInfo currentSnapshotInfo = + setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 4); + AtomicReference volume = new AtomicReference<>(getVolumes().get(3)); + AtomicReference bucket = new AtomicReference<>(getBuckets().get(3)); + + when(getReclaimableFilter().isReclaimable(any(Table.KeyValue.class))).thenAnswer(i -> { + if (i.getArgument(0) == null) { + return null; + } + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volume.get(), bucket.get(), + "snap" + actualNumberOfSnapshots, UUID.randomUUID(), 0); + SnapshotInfo prevSnapshot = SnapshotUtils.getLatestSnapshotInfo(volume.get(), bucket.get(), getOzoneManager(), + getSnapshotChainManager()); + getMockedSnapshotUtils().when( + () -> SnapshotUtils.getSnapshotInfo(eq(getOzoneManager()), eq(snapshotInfo.getTableKey()))) + .thenReturn(snapshotInfo); + getMockedSnapshotUtils().when( + () -> SnapshotUtils.getPreviousSnapshot(eq(getOzoneManager()), eq(getSnapshotChainManager()), + eq(snapshotInfo))).thenReturn(prevSnapshot); + getSnapshotInfos().get(getKey(volume.get(), bucket.get())).add(snapshotInfo); + return i.callRealMethod(); + }); + + if (currentSnapshotInfo == null) { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + null, true, numberOfPreviousSnapshotsFromChain == 0); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index + 1, + null, false, false); + } else { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, false, false); + } + } + + List testInvalidSnapshotArgs() { + List arguments = testReclaimableFilterArguments(); + return arguments.stream().flatMap(args -> IntStream.range(0, (int) args.get()[1]) + .mapToObj(i -> Arguments.of(args.get()[0], args.get()[1], args.get()[2], i))) + .collect(Collectors.toList()); + } + + @ParameterizedTest + @MethodSource("testInvalidSnapshotArgs") + public void testInitWithInactiveSnapshots(int numberOfPreviousSnapshotsFromChain, + int actualNumberOfSnapshots, + int index, + int snapIndex) throws IOException, RocksDBException { + SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, + 1, 1, (snapshotInfo) -> { + if (snapshotInfo.getVolumeName().equals(getVolumes().get(0)) && + snapshotInfo.getBucketName().equals(getBuckets().get(0)) + && snapshotInfo.getName().equals("snap" + snapIndex)) { + snapshotInfo.setSnapshotStatus(SnapshotInfo.SnapshotStatus.SNAPSHOT_DELETED); + } + return snapshotInfo; + }, BucketLayout.FILE_SYSTEM_OPTIMIZED); + + AtomicReference volume = new AtomicReference<>(getVolumes().get(0)); + AtomicReference bucket = new AtomicReference<>(getBuckets().get(0)); + int endIndex = Math.min(index - 1, actualNumberOfSnapshots - 1); + int beginIndex = Math.max(0, endIndex - numberOfPreviousSnapshotsFromChain + 1); + if (snapIndex < beginIndex || snapIndex > endIndex) { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, false, false); + } else { + IOException ex = assertThrows(IOException.class, () -> + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true)); + + assertEquals(String.format("Unable to load snapshot. Snapshot with table key '/%s/%s/%s' is no longer active", + volume.get(), bucket.get(), "snap" + snapIndex), ex.getMessage()); + } + } + + @ParameterizedTest + @MethodSource("testInvalidSnapshotArgs") + public void testInitWithUnflushedSnapshots(int numberOfPreviousSnapshotsFromChain, + int actualNumberOfSnapshots, + int index, + int snapIndex) throws IOException, RocksDBException { + SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, + 4, 4, (snapshotInfo) -> { + if (snapshotInfo.getVolumeName().equals(getVolumes().get(3)) && + snapshotInfo.getBucketName().equals(getBuckets().get(3)) + && snapshotInfo.getName().equals("snap" + snapIndex)) { + try { + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(0, 11).toByteString()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + return snapshotInfo; + }, BucketLayout.FILE_SYSTEM_OPTIMIZED); + + AtomicReference volume = new AtomicReference<>(getVolumes().get(3)); + AtomicReference bucket = new AtomicReference<>(getBuckets().get(3)); + int endIndex = Math.min(index - 1, actualNumberOfSnapshots - 1); + int beginIndex = Math.max(0, endIndex - numberOfPreviousSnapshotsFromChain + 1); + if (snapIndex < beginIndex || snapIndex > endIndex) { + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true); + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, false, false); + } else { + IOException ex = assertThrows(IOException.class, () -> + testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, + currentSnapshotInfo, true, true)); + assertEquals(String.format("Changes made to the snapshot %s have not been flushed to the disk ", + getSnapshotInfos().get(getKey(volume.get(), bucket.get())).get(snapIndex)), ex.getMessage()); + } + } +} From 20183705b43a48f443a9512dc86c7f73b2187de2 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 11 Mar 2025 18:19:50 -0700 Subject: [PATCH 03/24] HDDS-12562. Reclaimable Directory entry filter for reclaiming deleted directory entries Change-Id: Ia14c21b8e26fe9af60ef55bcd05951ae8bf36516 --- .../hadoop/ozone/om/TestKeyManagerImpl.java | 97 ++++++++++++ .../apache/hadoop/ozone/om/KeyManager.java | 16 +- .../hadoop/ozone/om/KeyManagerImpl.java | 28 ++++ .../snapshot/filter/ReclaimableDirFilter.java | 77 ++++++++++ .../filter/TestReclaimableDirFilter.java | 142 ++++++++++++++++++ 5 files changed, 359 insertions(+), 1 deletion(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 4aef43f86412..e9ab4bf4a4b9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -53,6 +53,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import jakarta.annotation.Nonnull; import java.io.File; @@ -69,6 +70,7 @@ import java.util.Set; import java.util.TreeSet; import java.util.UUID; +import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.tuple.Pair; @@ -104,6 +106,7 @@ import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.server.SCMConfigurator; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; @@ -112,6 +115,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -1586,6 +1590,99 @@ void testGetNotExistedPart() throws IOException { assertEquals(0, locationList.size()); } + private Table getMockedTable(Map map) throws IOException { + Table table = mock(Table.class); + when(table.get(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); + when(table.getIfExist(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); + return table; + } + + private OmKeyInfo getMockedOmKeyInfo(OmBucketInfo bucketInfo, long parentId, String key, long objectId) { + OmKeyInfo omKeyInfo = mock(OmKeyInfo.class); + if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { + when(omKeyInfo.getFileName()).thenReturn(key); + } else { + when(omKeyInfo.getParentObjectID()).thenReturn(parentId); + when(omKeyInfo.getKeyName()).thenReturn(key); + } + when(omKeyInfo.getObjectID()).thenReturn(objectId); + return omKeyInfo; + } + + private OmDirectoryInfo getMockedOmDirInfo(long parentId, String key, long objectId) { + OmDirectoryInfo omKeyInfo = mock(OmDirectoryInfo.class); + when(omKeyInfo.getName()).thenReturn(key); + when(omKeyInfo.getParentObjectID()).thenReturn(parentId); + when(omKeyInfo.getParentObjectID()).thenReturn(0L); + when(omKeyInfo.getObjectID()).thenReturn(objectId); + return omKeyInfo; + } + + private String getPath(long volumeId, OmBucketInfo bucketInfo, OmKeyInfo omKeyInfo) { + if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { + return volumeId + "/" + bucketInfo.getObjectID() + "/" + omKeyInfo.getParentObjectID() + "/" + + omKeyInfo.getFileName(); + } else { + return bucketInfo.getVolumeName() + "/" + bucketInfo.getBucketName() + "/" + omKeyInfo.getKeyName(); + } + } + + private String getPath(long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo omDirInfo) { + return volumeId + "/" + bucketInfo.getObjectID() + "/" + omDirInfo.getParentObjectID() + "/" + + omDirInfo.getName(); + } + + private String getRenameKey(String volume, String bucket, long objectId) { + return volume + "/" + bucket + "/" + objectId; + } + + @Test + public void testPreviousSnapshotOzoneDirInfo() throws IOException { + OMMetadataManager omMetadataManager = mock(OMMetadataManager.class); + when(omMetadataManager.getOzonePathKey(anyLong(), anyLong(), anyLong(), anyString())) + .thenAnswer(i -> Arrays.stream(i.getArguments()).map(Object::toString) + .collect(Collectors.joining("/"))); + when(omMetadataManager.getRenameKey(anyString(), anyString(), anyLong())).thenAnswer( + i -> getRenameKey(i.getArgument(0), i.getArgument(1), i.getArgument(2))); + + OMMetadataManager previousMetadataManager = mock(OMMetadataManager.class); + OzoneConfiguration configuration = new OzoneConfiguration(); + KeyManagerImpl km = new KeyManagerImpl(null, null, omMetadataManager, configuration, null, null, null); + KeyManagerImpl prevKM = new KeyManagerImpl(null, null, previousMetadataManager, configuration, null, null, null); + long volumeId = 1L; + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setBucketName(BUCKET_NAME).setVolumeName(VOLUME_NAME) + .setObjectID(2L).setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build(); + OmDirectoryInfo prevKey = getMockedOmDirInfo(5, "key", 1); + OmDirectoryInfo prevKey2 = getMockedOmDirInfo(7, "key2", 2); + OmKeyInfo currentKey = getMockedOmKeyInfo(bucketInfo, 6, "renamedKey", 1); + OmDirectoryInfo currentKeyDir = getMockedOmDirInfo(6, "renamedKey", 1); + OmKeyInfo currentKey2 = getMockedOmKeyInfo(bucketInfo, 7, "key2", 2); + OmDirectoryInfo currentKeyDir2 = getMockedOmDirInfo(7, "key2", 2); + OmKeyInfo currentKey3 = getMockedOmKeyInfo(bucketInfo, 8, "key3", 3); + OmDirectoryInfo currentKeyDir3 = getMockedOmDirInfo(8, "key3", 3); + OmKeyInfo currentKey4 = getMockedOmKeyInfo(bucketInfo, 8, "key4", 4); + OmDirectoryInfo currentKeyDir4 = getMockedOmDirInfo(8, "key4", 4); + Table prevDirTable = + getMockedTable(ImmutableMap.of( + getPath(volumeId, bucketInfo, prevKey), prevKey, + getPath(volumeId, bucketInfo, prevKey2), prevKey2)); + Table renameTable = getMockedTable( + ImmutableMap.of(getRenameKey(VOLUME_NAME, BUCKET_NAME, 1), getPath(volumeId, bucketInfo, prevKey), + getRenameKey(VOLUME_NAME, BUCKET_NAME, 3), getPath(volumeId, bucketInfo, + getMockedOmKeyInfo(bucketInfo, 6, "unknownKey", 9)))); + when(previousMetadataManager.getDirectoryTable()).thenReturn(prevDirTable); + when(omMetadataManager.getSnapshotRenamedTable()).thenReturn(renameTable); + assertEquals(prevKey, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey).apply(prevKM)); + assertEquals(prevKey2, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey2).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey3).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey4).apply(prevKM)); + + assertEquals(prevKey, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir).apply(prevKM)); + assertEquals(prevKey2, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir2).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir3).apply(prevKM)); + assertNull(km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKeyDir4).apply(prevKM)); + } + private void initKeyTableForMultipartTest(String keyName, String volume) throws IOException { List locationInfoGroups = new ArrayList<>(); List locationInfoList = new ArrayList<>(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index d25535b151d4..a53a90d74195 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -31,6 +31,8 @@ import org.apache.hadoop.ozone.om.fs.OzoneManagerFS; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.ListKeysResult; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; @@ -40,6 +42,7 @@ import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; +import org.apache.hadoop.ozone.util.CheckedFunction; /** * Handles key level commands. @@ -83,7 +86,6 @@ OmKeyInfo lookupKey(OmKeyArgs args, ResolvedBucket bucketLayout, OmKeyInfo getKeyInfo(OmKeyArgs args, ResolvedBucket buctket, String clientAddress) throws IOException; - /** * Returns a list of keys represented by {@link OmKeyInfo} * in the given bucket. @@ -134,6 +136,18 @@ List> getRenamesKeyEntries( String volume, String bucket, String startKey, int size) throws IOException; + /** + * Returns the previous snapshot's ozone keyInfo corresponding for the object. + */ + CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo directoryInfo) throws IOException; + + /** + * Returns the previous snapshot's ozone keyInfo corresponding for the object. + */ + CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmKeyInfo directoryInfo) throws IOException; + /** * Returns a list deleted entries from the deletedTable. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 4357542ff7b8..1140112312ea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -161,6 +161,7 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; +import org.apache.hadoop.ozone.util.CheckedFunction; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; @@ -731,6 +732,33 @@ public List> getRenamesKeyEntries( } } + @Override + public CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo keyInfo) throws IOException { + String currentKeyPath = metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(), + keyInfo.getParentObjectID(), keyInfo.getName()); + return getPreviousSnapshotOzonePathInfo(bucketInfo, keyInfo.getObjectID(), currentKeyPath, + (km) -> km.getMetadataManager().getDirectoryTable()); + } + + @Override + public CheckedFunction getPreviousSnapshotOzoneDirInfo( + long volumeId, OmBucketInfo bucketInfo, OmKeyInfo keyInfo) throws IOException { + String currentKeyPath = metadataManager.getOzonePathKey(volumeId, bucketInfo.getObjectID(), + keyInfo.getParentObjectID(), keyInfo.getFileName()); + return getPreviousSnapshotOzonePathInfo(bucketInfo, keyInfo.getObjectID(), currentKeyPath, + (previousSnapshotKM) -> previousSnapshotKM.getMetadataManager().getDirectoryTable()); + } + + private CheckedFunction getPreviousSnapshotOzonePathInfo( + OmBucketInfo bucketInfo, long objectId, String currentKeyPath, + Function> table) throws IOException { + String renameKey = metadataManager.getRenameKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName(), objectId); + String renamedKey = metadataManager.getSnapshotRenamedTable().getIfExist(renameKey); + return (previousSnapshotKM) -> table.apply(previousSnapshotKM).get( + renamedKey != null ? renamedKey : currentKeyPath); + } + @Override public List>> getDeletedKeyEntries( String volume, String bucket, String startKey, int size) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java new file mode 100644 index 000000000000..1123c6a2df73 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import java.io.IOException; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; + +/** + * Filter to return deleted directories which are reclaimable based on their presence in previous snapshot in + * the snapshot chain. + */ +public class ReclaimableDirFilter extends ReclaimableFilter { + + /** + * Filter to return deleted directories which are reclaimable based on their presence in previous snapshot in + * the snapshot chain. + */ + public ReclaimableDirFilter(OzoneManager ozoneManager, + OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, + SnapshotInfo currentSnapshotInfo, KeyManager keyManager, + IOzoneManagerLock lock) { + super(ozoneManager, omSnapshotManager, snapshotChainManager, currentSnapshotInfo, keyManager, lock, 1); + } + + @Override + protected String getVolumeName(Table.KeyValue keyValue) throws IOException { + return keyValue.getValue().getVolumeName(); + } + + @Override + protected String getBucketName(Table.KeyValue keyValue) throws IOException { + return keyValue.getValue().getBucketName(); + } + + @Override + protected Boolean isReclaimable(Table.KeyValue deletedDirInfo) throws IOException { + ReferenceCounted previousSnapshot = getPreviousOmSnapshot(0); + KeyManager prevKeyManager = previousSnapshot == null ? null : previousSnapshot.get().getKeyManager(); + return isDirReclaimable(getVolumeId(), getBucketInfo(), deletedDirInfo.getValue(), getKeyManager(), prevKeyManager); + } + + private boolean isDirReclaimable(long volumeId, OmBucketInfo bucketInfo, OmKeyInfo dirInfo, + KeyManager keyManager, KeyManager previousKeyManager) throws IOException { + if (previousKeyManager == null) { + return true; + } + OmDirectoryInfo prevDirectoryInfo = + keyManager.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, dirInfo).apply(previousKeyManager); + return prevDirectoryInfo == null || prevDirectoryInfo.getObjectID() != dirInfo.getObjectID(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java new file mode 100644 index 000000000000..590d96e165db --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot.filter; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.rocksdb.RocksDBException; + +/** + * Test class for ReclaimableDirFilter. + */ +public class TestReclaimableDirFilter extends TestAbstractReclaimableFilter { + @Override + protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, + SnapshotChainManager chainManager, SnapshotInfo currentSnapshotInfo, + KeyManager km, IOzoneManagerLock lock, + int numberOfPreviousSnapshotsFromChain) { + return new ReclaimableDirFilter(om, snapshotManager, chainManager, currentSnapshotInfo, km, lock); + } + + List testReclaimableFilterArguments() { + List arguments = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 5; j++) { + arguments.add(Arguments.of(i, j)); + } + } + return arguments; + } + + private void testReclaimableDirFilter(String volume, String bucket, int index, + OmKeyInfo dirInfo, OmDirectoryInfo prevDirInfo, + Boolean expectedValue) + throws IOException { + List snapshotInfos = getLastSnapshotInfos(volume, bucket, 1, index); + SnapshotInfo prevSnapshotInfo = snapshotInfos.get(0); + OmBucketInfo bucketInfo = getOzoneManager().getBucketInfo(volume, bucket); + long volumeId = getOzoneManager().getMetadataManager().getVolumeId(volume); + KeyManager keyManager = getKeyManager(); + if (prevSnapshotInfo != null) { + ReferenceCounted prevSnap = Optional.ofNullable(prevSnapshotInfo) + .map(info -> { + try { + return getOmSnapshotManager().getActiveSnapshot(volume, bucket, info.getName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).orElse(null); + mockOmSnapshot(prevSnap); + when(keyManager.getPreviousSnapshotOzoneDirInfo(eq(volumeId), eq(bucketInfo), eq(dirInfo))) + .thenReturn((km) -> prevDirInfo); + } + + when(dirInfo.getVolumeName()).thenReturn(volume); + when(dirInfo.getBucketName()).thenReturn(bucket); + assertEquals(expectedValue, getReclaimableFilter().apply(Table.newKeyValue("key", dirInfo))); + } + + private OmKeyInfo getMockedOmKeyInfo(long objectId) { + OmKeyInfo keyInfo = mock(OmKeyInfo.class); + when(keyInfo.getObjectID()).thenReturn(objectId); + return keyInfo; + } + + private OmDirectoryInfo getMockedOmDirInfo(long objectId) { + OmDirectoryInfo keyInfo = mock(OmDirectoryInfo.class); + when(keyInfo.getObjectID()).thenReturn(objectId); + return keyInfo; + } + + private KeyManager mockOmSnapshot(ReferenceCounted snapshot) { + if (snapshot != null) { + OmSnapshot omSnapshot = snapshot.get(); + KeyManager keyManager = mock(KeyManager.class); + when(omSnapshot.getKeyManager()).thenReturn(keyManager); + return keyManager; + } + return null; + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testNonReclaimableDirectory(int actualNumberOfSnapshots, int index) throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo dirInfo = getMockedOmKeyInfo(1); + OmDirectoryInfo prevDirectoryInfo = index - 1 >= 0 ? getMockedOmDirInfo(1) : null; + testReclaimableDirFilter(volume, bucket, index, dirInfo, prevDirectoryInfo, prevDirectoryInfo == null); + } + + @ParameterizedTest + @MethodSource("testReclaimableFilterArguments") + public void testReclaimableKeyWithDifferentObjId(int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { + setup(1, actualNumberOfSnapshots, index, 4, 2); + String volume = getVolumes().get(3); + String bucket = getBuckets().get(1); + index = Math.min(index, actualNumberOfSnapshots); + OmKeyInfo dirInfo = getMockedOmKeyInfo(1); + OmDirectoryInfo prevDirectoryInfo = index - 1 >= 0 ? getMockedOmDirInfo(2) : null; + testReclaimableDirFilter(volume, bucket, index, dirInfo, prevDirectoryInfo, true); + } +} From f1c85fd6768eebf51080eccda17c451b839707b6 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 12 Mar 2025 05:51:00 -0700 Subject: [PATCH 04/24] HDDS-12560. Mock SnapshotDiffManager construction Change-Id: I772783d23bf250ca6d468bf6326a09a49c614eb9 --- .../om/snapshot/filter/TestAbstractReclaimableFilter.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java index 0a65b5919b9d..783af6151e8a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java @@ -23,6 +23,7 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; @@ -60,6 +61,7 @@ import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; +import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.junit.jupiter.api.AfterEach; @@ -175,6 +177,9 @@ private void mockOzoneManager(BucketLayout bucketLayout) throws IOException { private void mockOmSnapshotManager(OzoneManager om) throws RocksDBException, IOException { try (MockedStatic rocksdb = Mockito.mockStatic(ManagedRocksDB.class); + MockedConstruction mockedSnapshotDiffManager = + Mockito.mockConstruction(SnapshotDiffManager.class, (mock, context) -> + doNothing().when(mock).close()); MockedConstruction mockedCache = Mockito.mockConstruction(SnapshotCache.class, (mock, context) -> { Map> map = new HashMap<>(); From 43ab7b744e73e4b9bd91e91b3c8f6acaa6d16b48 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 12 Mar 2025 11:36:19 -0700 Subject: [PATCH 05/24] HDDS-12559. Revert unintended change in method signature Change-Id: I1c1ad2f536ead10a92caa7eec0b8d891289a0257 --- .../java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index c91619379ea6..477f271d79ae 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -253,7 +253,7 @@ private OMLockDetails acquireLocks(Resource resource, boolean isReadLock, } private OMLockDetails acquireLock(Resource resource, boolean isReadLock, - String[] keys) { + String... keys) { omLockDetails.get().clear(); if (!resource.canLock(lockSet.get())) { String errorMessage = getErrorMessage(resource); From b90116604943522540514c1e997ca23786ff6594 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 31 Mar 2025 18:10:34 -0700 Subject: [PATCH 06/24] HDDS-12559. Address review comments Change-Id: I5b4eedefd85881933572df1a6c8b2960a2cc9278 --- .../hadoop/ozone/om/lock/OzoneManagerLock.java | 16 ++++++++++++---- .../ozone/om/snapshot/MultiSnapshotLocks.java | 14 ++++++-------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index 477f271d79ae..35047239ce98 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -37,6 +37,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.utils.CompositeKey; import org.apache.hadoop.hdds.utils.SimpleStriped; @@ -128,8 +129,13 @@ private Striped createStripeLock(Resource r, private Iterable bulkGetLock(Resource resource, Collection keys) { Striped striped = stripedLockByResource.get(resource); - return striped.bulkGet(keys.stream().filter(Objects::nonNull) - .map(CompositeKey::combineKeys).collect(Collectors.toList())); + List lockKeys = new ArrayList<>(keys.size()); + for (String[] key : keys) { + if (Objects.nonNull(key)) { + lockKeys.add(CompositeKey.combineKeys(key)); + } + } + return striped.bulkGet(lockKeys); } private ReentrantReadWriteLock getLock(Resource resource, String... keys) { @@ -448,8 +454,10 @@ private OMLockDetails releaseLock(Resource resource, boolean isReadLock, private OMLockDetails releaseLocks(Resource resource, boolean isReadLock, Collection keys) { omLockDetails.get().clear(); - Iterable locks = bulkGetLock(resource, keys); - + List locks = + StreamSupport.stream(bulkGetLock(resource, keys).spliterator(), false).collect(Collectors.toList()); + // Release locks in reverse order. + Collections.reverse(locks); for (ReadWriteLock lock : locks) { if (isReadLock) { lock.readLock().unlock(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java index 9b8b0db2a698..3e141c0e9ff4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.om.snapshot; -import com.google.common.annotations.VisibleForTesting; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -48,10 +47,10 @@ public MultiSnapshotLocks(IOzoneManagerLock lock, OzoneManagerLock.Resource reso this.lockDetails = OMLockDetails.EMPTY_DETAILS_LOCK_NOT_ACQUIRED; } - public OMLockDetails acquireLock(Collection ids) throws OMException { - if (!objectLocks.isEmpty()) { - throw new OMException("More locks cannot be acquired when locks have been already acquired. Locks acquired : " - + objectLocks.stream().map(Arrays::toString).collect(Collectors.toList()), + public synchronized OMLockDetails acquireLock(Collection ids) throws OMException { + if (this.lockDetails.isLockAcquired()) { + throw new OMException("More locks cannot be acquired when locks have been already acquired. Locks acquired : [" + + objectLocks.stream().map(Arrays::toString).collect(Collectors.joining(",")) + "]", OMException.ResultCodes.INTERNAL_ERROR); } List keys = @@ -66,7 +65,7 @@ public OMLockDetails acquireLock(Collection ids) throws OMException { return omLockDetails; } - public void releaseLock() { + public synchronized void releaseLock() { if (this.writeLock) { lockDetails = lock.releaseWriteLocks(resource, this.objectLocks); } else { @@ -75,8 +74,7 @@ public void releaseLock() { this.objectLocks.clear(); } - @VisibleForTesting - public List getObjectLocks() { + List getObjectLocks() { return objectLocks; } From 865f3a5097a0f0706cf1050682854d30b633aa0b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 1 Apr 2025 01:04:37 -0700 Subject: [PATCH 07/24] HDDS-12559. Add javadoc Change-Id: I411b2d0a6f3fa0551ec2f306317440f8ed7387c0 --- .../org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index 35047239ce98..2f3e2c939937 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -403,6 +403,14 @@ public OMLockDetails releaseWriteLock(Resource resource, String... keys) { return releaseLock(resource, false, keys); } + /** + * Release write lock on multiple resources. + * @param resource - Type of the resource. + * @param keys - List of resource names on which user want to acquire lock. + * For Resource type BUCKET_LOCK, first param should be volume, second param + * should be bucket name. For remaining all resource only one param should + * be passed. + */ @Override public OMLockDetails releaseWriteLocks(Resource resource, Collection keys) { return releaseLocks(resource, false, keys); From 5743edbef6e8b8a7776f52f7d6d7fa90928d8a80 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 1 Apr 2025 11:57:12 -0700 Subject: [PATCH 08/24] HDDS-12560. Address review comments Change-Id: Ie7575e9b7ccb1eb9c2b6d875e9d88e4005d12026 --- .../om/snapshot/filter/ReclaimableFilter.java | 26 +++++++------------ .../om/snapshot/TestMultiSnapshotLocks.java | 2 +- .../filter/TestReclaimableFilter.java | 12 ++++----- 3 files changed, 17 insertions(+), 23 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index dcbc2f44e6e8..9bebfb6a6958 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.om.snapshot.filter; -import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import java.io.Closeable; import java.io.IOException; @@ -94,8 +93,8 @@ public ReclaimableFilter(OzoneManager ozoneManager, OmSnapshotManager omSnapshot private List getLastNSnapshotInChain(String volume, String bucket) throws IOException { if (currentSnapshotInfo != null && (!currentSnapshotInfo.getVolumeName().equals(volume) || !currentSnapshotInfo.getBucketName().equals(bucket))) { - throw new IOException("Volume & Bucket name for snapshot : " + currentSnapshotInfo + " not matching for " + - "key in volume: " + volume + " bucket: " + bucket); + throw new IOException("Volume and Bucket name for snapshot : " + currentSnapshotInfo + " do not match " + + "against the volume: " + volume + " and bucket: " + bucket + " of the key."); } SnapshotInfo expectedPreviousSnapshotInfo = currentSnapshotInfo == null ? SnapshotUtils.getLatestSnapshotInfo(volume, bucket, ozoneManager, snapshotChainManager) @@ -131,10 +130,6 @@ private boolean validateExistingLastNSnapshotsInChain(String volume, String buck // Initialize the last N snapshots in the chain by acquiring locks. Throw IOException if it fails. private void initializePreviousSnapshotsFromChain(String volume, String bucket) throws IOException { - if (validateExistingLastNSnapshotsInChain(volume, bucket) && snapshotIdLocks.isLockAcquired()) { - return; - } - // If existing snapshotIds don't match then close all snapshots and reopen the previous N snapshots. close(); try { // Acquire lock on last N snapshot & current snapshot(AOS if it is null). @@ -157,14 +152,14 @@ private void initializePreviousSnapshotsFromChain(String volume, String bucket) previousSnapshotInfos.add(null); } - // TODO: Getting volumeId and bucket from active OM. This would be wrong on volume & bucket renames + // NOTE: Getting volumeId and bucket from active OM. This would be wrong on volume & bucket renames // support. bucketInfo = ozoneManager.getBucketInfo(volume, bucket); volumeId = ozoneManager.getMetadataManager().getVolumeId(volume); } } else { - throw new IOException("Lock acquisition failed for last N snapshots : " + - expectedLastNSnapshotsInChain + " " + currentSnapshotInfo); + throw new IOException("Lock acquisition failed for last N snapshots: " + + expectedLastNSnapshotsInChain + ", " + currentSnapshotInfo); } } catch (IOException e) { this.close(); @@ -176,7 +171,10 @@ private void initializePreviousSnapshotsFromChain(String volume, String bucket) public synchronized Boolean apply(Table.KeyValue keyValue) throws IOException { String volume = getVolumeName(keyValue); String bucket = getBucketName(keyValue); - initializePreviousSnapshotsFromChain(volume, bucket); + // If existing snapshotIds don't match then close all snapshots and reopen the previous N snapshots. + if (!validateExistingLastNSnapshotsInChain(volume, bucket) || !snapshotIdLocks.isLockAcquired()) { + initializePreviousSnapshotsFromChain(volume, bucket); + } boolean isReclaimable = isReclaimable(keyValue); // This is to ensure the reclamation ran on the same previous snapshot and no change occurred in the chain // while processing the entry. @@ -192,9 +190,7 @@ public synchronized Boolean apply(Table.KeyValue keyValue) throws IOE @Override public void close() throws IOException { this.snapshotIdLocks.releaseLock(); - for (ReferenceCounted previousOmSnapshot : previousOmSnapshots) { - IOUtils.close(LOG, previousOmSnapshot); - } + IOUtils.close(LOG, previousOmSnapshots); previousOmSnapshots.clear(); previousSnapshotInfos.clear(); } @@ -223,12 +219,10 @@ protected OzoneManager getOzoneManager() { return ozoneManager; } - @VisibleForTesting List getPreviousSnapshotInfos() { return previousSnapshotInfos; } - @VisibleForTesting List> getPreviousOmSnapshots() { return previousOmSnapshots; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java index 741f1d30c36e..3955f8f6a8eb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java @@ -117,7 +117,7 @@ void testReleaseLock() throws Exception { } @Test - void testAcquireLockWhenAlreadyAcquiredThrowsException() throws Exception { + void testAcquireLockWhenLockIsAlreadyAcquired() throws Exception { List objects = Collections.singletonList(obj1); OMLockDetails mockLockDetails = mock(OMLockDetails.class); when(mockLockDetails.isLockAcquired()).thenReturn(true); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java index e4c586670ca0..b510bd67897f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java @@ -143,9 +143,9 @@ public void testReclaimableFilterWithBucketVolumeMismatch(int numberOfPreviousSn IOException ex = assertThrows(IOException.class, () -> testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, true, true)); - assertEquals("Volume & Bucket name for snapshot : " - + currentSnapshotInfo + " not matching for key in volume: " + volume - + " bucket: " + bucket, ex.getMessage()); + assertEquals("Volume and Bucket name for snapshot : " + + currentSnapshotInfo + " do not match against the volume: " + volume + + " and bucket: " + bucket + " of the key.", ex.getMessage()); } volume.set(getVolumes().get(3)); bucket.set(getBuckets().get(2)); @@ -158,9 +158,9 @@ public void testReclaimableFilterWithBucketVolumeMismatch(int numberOfPreviousSn IOException ex = assertThrows(IOException.class, () -> testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, true, true)); - assertEquals("Volume & Bucket name for snapshot : " - + currentSnapshotInfo + " not matching for key in volume: " + volume - + " bucket: " + bucket, ex.getMessage()); + assertEquals("Volume and Bucket name for snapshot : " + + currentSnapshotInfo + " do not match against the volume: " + volume + + " and bucket: " + bucket + " of the key.", ex.getMessage()); } } From abcfaffc48e13741d003bfd05c8a2766d0e7d077 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 1 Apr 2025 12:05:06 -0700 Subject: [PATCH 09/24] HDDS-12559. Move acquireLock to another private function Change-Id: I53fa063a7268e23870b3a7a6851e9920b81ed241 --- .../ozone/om/lock/OzoneManagerLock.java | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index 2f3e2c939937..8e5841cfadd2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -232,6 +232,17 @@ public OMLockDetails acquireWriteLocks(Resource resource, Collection k return acquireLocks(resource, false, keys); } + private void acquireLock(Resource resource, boolean isReadLock, ReadWriteLock lock, + long startWaitingTimeNanos) { + if (isReadLock) { + lock.readLock().lock(); + updateReadLockMetrics(resource, (ReentrantReadWriteLock) lock, startWaitingTimeNanos); + } else { + lock.writeLock().lock(); + updateWriteLockMetrics(resource, (ReentrantReadWriteLock) lock, startWaitingTimeNanos); + } + } + private OMLockDetails acquireLocks(Resource resource, boolean isReadLock, Collection keys) { omLockDetails.get().clear(); @@ -244,13 +255,7 @@ private OMLockDetails acquireLocks(Resource resource, boolean isReadLock, long startWaitingTimeNanos = Time.monotonicNowNanos(); for (ReadWriteLock lock : bulkGetLock(resource, keys)) { - if (isReadLock) { - lock.readLock().lock(); - updateReadLockMetrics(resource, (ReentrantReadWriteLock) lock, startWaitingTimeNanos); - } else { - lock.writeLock().lock(); - updateWriteLockMetrics(resource, (ReentrantReadWriteLock) lock, startWaitingTimeNanos); - } + acquireLock(resource, isReadLock, lock, startWaitingTimeNanos); } lockSet.set(resource.setLock(lockSet.get())); @@ -270,13 +275,7 @@ private OMLockDetails acquireLock(Resource resource, boolean isReadLock, long startWaitingTimeNanos = Time.monotonicNowNanos(); ReentrantReadWriteLock lock = getLock(resource, keys); - if (isReadLock) { - lock.readLock().lock(); - updateReadLockMetrics(resource, lock, startWaitingTimeNanos); - } else { - lock.writeLock().lock(); - updateWriteLockMetrics(resource, lock, startWaitingTimeNanos); - } + acquireLock(resource, isReadLock, lock, startWaitingTimeNanos); lockSet.set(resource.setLock(lockSet.get())); omLockDetails.get().setLockAcquired(true); From a2127a47a3512851866abbebf74e2a012186fdcc Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 1 Apr 2025 12:50:42 -0700 Subject: [PATCH 10/24] HDDS-12560. Address review comments Change-Id: I298fc6d96335b5cb9c79f46901f0d89dc2829362 --- .../apache/hadoop/ozone/util/CheckedFunction.java | 1 - .../ozone/om/snapshot/filter/ReclaimableFilter.java | 13 +++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java index 0d565cbf3f7a..d159dea8ba09 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.util; /** - * * Represents a function that accepts one argument and produces a result. * This is a functional interface whose functional method is apply(Object). * Type parameters: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index 9bebfb6a6958..33951608a0cc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -69,10 +69,15 @@ public abstract class ReclaimableFilter implements CheckedFunction Date: Wed, 2 Apr 2025 00:20:06 -0700 Subject: [PATCH 11/24] HDDS-12559. Address review comments Change-Id: I9c80b73b6b72d7214edb3e92ae32799a4308f223 --- .../org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java | 2 +- .../apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index 8e5841cfadd2..9fd567344cd0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -233,7 +233,7 @@ public OMLockDetails acquireWriteLocks(Resource resource, Collection k } private void acquireLock(Resource resource, boolean isReadLock, ReadWriteLock lock, - long startWaitingTimeNanos) { + long startWaitingTimeNanos) { if (isReadLock) { lock.readLock().lock(); updateReadLockMetrics(resource, (ReentrantReadWriteLock) lock, startWaitingTimeNanos); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java index 3e141c0e9ff4..aac3b097b22e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java @@ -49,8 +49,9 @@ public MultiSnapshotLocks(IOzoneManagerLock lock, OzoneManagerLock.Resource reso public synchronized OMLockDetails acquireLock(Collection ids) throws OMException { if (this.lockDetails.isLockAcquired()) { - throw new OMException("More locks cannot be acquired when locks have been already acquired. Locks acquired : [" - + objectLocks.stream().map(Arrays::toString).collect(Collectors.joining(",")) + "]", + throw new OMException( + objectLocks.stream().map(Arrays::toString).collect(Collectors.joining(",", + "More locks cannot be acquired when locks have been already acquired. Locks acquired : [", "]")), OMException.ResultCodes.INTERNAL_ERROR); } List keys = From 9d6bae3a4f09b00bb10e461a4f5f7f9b66be04d0 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 2 Apr 2025 20:37:42 -0700 Subject: [PATCH 12/24] HDDS-12560. Address review comments Change-Id: I20a877c63a968d9bf912e12cfd10c7aebc7a25bb --- .../om/snapshot/filter/ReclaimableFilter.java | 91 +++++++++++-------- 1 file changed, 52 insertions(+), 39 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index 33951608a0cc..a8ac39bbe3b0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -17,14 +17,13 @@ package org.apache.hadoop.ozone.om.snapshot.filter; -import com.google.common.collect.Lists; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Objects; import java.util.UUID; -import java.util.stream.Collectors; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.KeyManager; @@ -56,7 +55,8 @@ public abstract class ReclaimableFilter implements CheckedFunction tmpValidationSnapshotInfos; + private final List lockedSnapshotIds; private final List previousSnapshotInfos; private final List> previousOmSnapshots; private final MultiSnapshotLocks snapshotIdLocks; @@ -93,6 +93,9 @@ public ReclaimableFilter(OzoneManager ozoneManager, OmSnapshotManager omSnapshot this.numberOfPreviousSnapshotsFromChain = numberOfPreviousSnapshotsFromChain; this.previousOmSnapshots = new ArrayList<>(numberOfPreviousSnapshotsFromChain); this.previousSnapshotInfos = new ArrayList<>(numberOfPreviousSnapshotsFromChain); + // Used for tmp list to avoid lots of garbage collection of list. + this.tmpValidationSnapshotInfos = new ArrayList<>(numberOfPreviousSnapshotsFromChain); + this.lockedSnapshotIds = new ArrayList<>(numberOfPreviousSnapshotsFromChain + 1); } private List getLastNSnapshotInChain(String volume, String bucket) throws IOException { @@ -101,36 +104,42 @@ private List getLastNSnapshotInChain(String volume, String bucket) throw new IOException("Volume and Bucket name for snapshot : " + currentSnapshotInfo + " do not match " + "against the volume: " + volume + " and bucket: " + bucket + " of the key."); } + tmpValidationSnapshotInfos.clear(); SnapshotInfo expectedPreviousSnapshotInfo = currentSnapshotInfo == null ? SnapshotUtils.getLatestSnapshotInfo(volume, bucket, ozoneManager, snapshotChainManager) : SnapshotUtils.getPreviousSnapshot(ozoneManager, snapshotChainManager, currentSnapshotInfo); - List snapshotInfos = Lists.newArrayList(); SnapshotInfo snapshotInfo = expectedPreviousSnapshotInfo; - while (snapshotInfos.size() < numberOfPreviousSnapshotsFromChain) { + while (tmpValidationSnapshotInfos.size() < numberOfPreviousSnapshotsFromChain) { // If changes made to the snapshot have not been flushed to disk, throw exception immediately, next run of // garbage collection would process the snapshot. if (!OmSnapshotManager.areSnapshotChangesFlushedToDB(ozoneManager.getMetadataManager(), snapshotInfo)) { throw new IOException("Changes made to the snapshot " + snapshotInfo + " have not been flushed to the disk "); } - snapshotInfos.add(snapshotInfo); + tmpValidationSnapshotInfos.add(snapshotInfo); snapshotInfo = snapshotInfo == null ? null : SnapshotUtils.getPreviousSnapshot(ozoneManager, snapshotChainManager, snapshotInfo); } // Reversing list to get the correct order in chain. To ensure locking order is as per the chain ordering. - Collections.reverse(snapshotInfos); - return snapshotInfos; + Collections.reverse(tmpValidationSnapshotInfos); + return tmpValidationSnapshotInfos; } private boolean validateExistingLastNSnapshotsInChain(String volume, String bucket) throws IOException { List expectedLastNSnapshotsInChain = getLastNSnapshotInChain(volume, bucket); - List expectedSnapshotIds = expectedLastNSnapshotsInChain.stream() - .map(snapshotInfo -> snapshotInfo == null ? null : snapshotInfo.getSnapshotId()) - .collect(Collectors.toList()); - List existingSnapshotIds = previousOmSnapshots.stream() - .map(omSnapshotReferenceCounted -> omSnapshotReferenceCounted == null ? null : - omSnapshotReferenceCounted.get().getSnapshotID()).collect(Collectors.toList()); - return expectedSnapshotIds.equals(existingSnapshotIds); + if (expectedLastNSnapshotsInChain.size() != previousOmSnapshots.size()) { + return false; + } + for (int i = 0; i < expectedLastNSnapshotsInChain.size(); i++) { + SnapshotInfo snapshotInfo = expectedLastNSnapshotsInChain.get(i); + ReferenceCounted omSnapshot = previousOmSnapshots.get(i); + UUID snapshotId = snapshotInfo == null ? null : snapshotInfo.getSnapshotId(); + UUID existingOmSnapshotId = omSnapshot == null ? null : omSnapshot.get().getSnapshotID(); + if (!Objects.equals(snapshotId, existingOmSnapshotId)) { + return false; + } + } + return true; } // Initialize the last N snapshots in the chain by acquiring locks. Throw IOException if it fails. @@ -139,35 +148,34 @@ private void initializePreviousSnapshotsFromChain(String volume, String bucket) try { // Acquire lock on last N snapshot & current snapshot(AOS if it is null). List expectedLastNSnapshotsInChain = getLastNSnapshotInChain(volume, bucket); - List lockIds = expectedLastNSnapshotsInChain.stream() - .map(snapshotInfo -> snapshotInfo == null ? null : snapshotInfo.getSnapshotId()) - .collect(Collectors.toList()); + for (SnapshotInfo snapshotInfo : expectedLastNSnapshotsInChain) { + lockedSnapshotIds.add(snapshotInfo == null ? null : snapshotInfo.getSnapshotId()); + } //currentSnapshotInfo for AOS will be null. - lockIds.add(currentSnapshotInfo == null ? null : currentSnapshotInfo.getSnapshotId()); - - if (snapshotIdLocks.acquireLock(lockIds).isLockAcquired()) { - for (SnapshotInfo snapshotInfo : expectedLastNSnapshotsInChain) { - if (snapshotInfo != null) { - // Fail operation if any of the previous snapshots are not active. - previousOmSnapshots.add(omSnapshotManager.getActiveSnapshot(snapshotInfo.getVolumeName(), - snapshotInfo.getBucketName(), snapshotInfo.getName())); - previousSnapshotInfos.add(snapshotInfo); - } else { - previousOmSnapshots.add(null); - previousSnapshotInfos.add(null); - } - - // NOTE: Getting volumeId and bucket from active OM. This would be wrong on volume & bucket renames - // support. - bucketInfo = ozoneManager.getBucketInfo(volume, bucket); - volumeId = ozoneManager.getMetadataManager().getVolumeId(volume); - } - } else { + lockedSnapshotIds.add(currentSnapshotInfo == null ? null : currentSnapshotInfo.getSnapshotId()); + + if (!snapshotIdLocks.acquireLock(lockedSnapshotIds).isLockAcquired()) { throw new IOException("Lock acquisition failed for last N snapshots: " + expectedLastNSnapshotsInChain + ", " + currentSnapshotInfo); } + for (SnapshotInfo snapshotInfo : expectedLastNSnapshotsInChain) { + if (snapshotInfo != null) { + // Fail operation if any of the previous snapshots are not active. + previousOmSnapshots.add(omSnapshotManager.getActiveSnapshot(snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), snapshotInfo.getName())); + previousSnapshotInfos.add(snapshotInfo); + } else { + previousOmSnapshots.add(null); + previousSnapshotInfos.add(null); + } + + // NOTE: Getting volumeId and bucket from active OM. This would be wrong on volume & bucket renames + // support. + bucketInfo = ozoneManager.getBucketInfo(volume, bucket); + volumeId = ozoneManager.getMetadataManager().getVolumeId(volume); + } } catch (IOException e) { - this.close(); + this.cleanup(); throw e; } } @@ -194,10 +202,15 @@ public synchronized Boolean apply(Table.KeyValue keyValue) throws IOE @Override public void close() throws IOException { + this.cleanup(); + } + + private void cleanup() { this.snapshotIdLocks.releaseLock(); IOUtils.close(LOG, previousOmSnapshots); previousOmSnapshots.clear(); previousSnapshotInfos.clear(); + lockedSnapshotIds.clear(); } protected ReferenceCounted getPreviousOmSnapshot(int index) { From 93ba939a51b586248363c75efafef44342b9f588 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 17 Apr 2025 08:23:55 -0700 Subject: [PATCH 13/24] HDDS-12560. Address review comments Change-Id: I4e57e43596a9d90ecb2dad62883b746d8cdf992e --- .../om/snapshot/filter/TestReclaimableFilter.java | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java index b510bd67897f..7c94d33689b9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java @@ -81,6 +81,12 @@ protected Boolean isReclaimable(Table.KeyValue keyValue) throws }; } + /** + * Method for creating arguments for paramatrized tests requiring arguments in the following order: + * numberOfPreviousSnapshotsFromChain: Number of previous snapshots in the chain. + * actualNumberOfSnapshots: Total number of snapshots in the chain. + * index: Index of snapshot in the chain for testing. If index > actualNumberOfSnapshots test case will run for AOS. + */ List testReclaimableFilterArguments() { List arguments = new ArrayList<>(); for (int i = 0; i < 3; i++) { @@ -112,9 +118,9 @@ private void testSnapshotInitAndLocking(String volume, String bucket, int number @ParameterizedTest @MethodSource("testReclaimableFilterArguments") - public void testReclaimableFilterSnapshotChainInitilization(int numberOfPreviousSnapshotsFromChain, - int actualNumberOfSnapshots, - int index) throws IOException, RocksDBException { + public void testReclaimableFilterSnapshotChainInitialization(int numberOfPreviousSnapshotsFromChain, + int actualNumberOfSnapshots, + int index) throws IOException, RocksDBException { SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 2); String volume = getVolumes().get(3); From 3935bf8e5b599d386f1a931f534084f481124b69 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran <47532440+swamirishi@users.noreply.github.com> Date: Thu, 17 Apr 2025 08:24:50 -0700 Subject: [PATCH 14/24] Update hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java Co-authored-by: Doroszlai, Attila <6454655+adoroszlai@users.noreply.github.com> --- .../ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java index 783af6151e8a..0caa2d8df251 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java @@ -83,7 +83,7 @@ * Test class for ReclaimableFilter. */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) -public abstract class TestAbstractReclaimableFilter { +public abstract class AbstractReclaimableFilterTest { private ReclaimableFilter reclaimableFilter; private OzoneManager ozoneManager; From 6d638a0c9e0cf6abd7337b708541993de3fc89b4 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 17 Apr 2025 08:29:32 -0700 Subject: [PATCH 15/24] HDDS-12560. Fix method folding Change-Id: I7114755259f35362cc7a5db527663e39e38c8b7a --- .../filter/TestAbstractReclaimableFilter.java | 32 +++++++------ .../filter/TestReclaimableFilter.java | 45 +++++++++---------- 2 files changed, 36 insertions(+), 41 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java index 783af6151e8a..bd3a00a55801 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java @@ -98,30 +98,28 @@ public abstract class TestAbstractReclaimableFilter { private SnapshotChainManager snapshotChainManager; private KeyManager keyManager; - protected abstract ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, - SnapshotChainManager chainManager, - SnapshotInfo currentSnapshotInfo, KeyManager km, - IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain); + protected abstract ReclaimableFilter initializeFilter( + OzoneManager om, OmSnapshotManager snapshotManager, SnapshotChainManager chainManager, + SnapshotInfo currentSnapshotInfo, KeyManager km, IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain); - protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, - int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, - int numberOfBucketsPerVolume) throws RocksDBException, IOException { + protected SnapshotInfo setup( + int numberOfPreviousSnapshotsFromChain, int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume) throws RocksDBException, IOException { return setup(numberOfPreviousSnapshotsFromChain, actualTotalNumberOfSnapshotsInChain, index, numberOfVolumes, numberOfBucketsPerVolume, (info) -> info, BucketLayout.FILE_SYSTEM_OPTIMIZED); } - protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, - int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, - int numberOfBucketsPerVolume, BucketLayout bucketLayout) - throws RocksDBException, IOException { + protected SnapshotInfo setup( + int numberOfPreviousSnapshotsFromChain, int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume, BucketLayout bucketLayout) throws RocksDBException, IOException { return setup(numberOfPreviousSnapshotsFromChain, actualTotalNumberOfSnapshotsInChain, index, numberOfVolumes, numberOfBucketsPerVolume, (info) -> info, bucketLayout); } - protected SnapshotInfo setup(int numberOfPreviousSnapshotsFromChain, - int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, - int numberOfBucketsPerVolume, Function snapshotProps, - BucketLayout bucketLayout) throws IOException, RocksDBException { + protected SnapshotInfo setup( + int numberOfPreviousSnapshotsFromChain, int actualTotalNumberOfSnapshotsInChain, int index, int numberOfVolumes, + int numberOfBucketsPerVolume, Function snapshotProps, + BucketLayout bucketLayout) throws IOException, RocksDBException { this.ozoneManager = mock(OzoneManager.class); this.snapshotChainManager = mock(SnapshotChainManager.class); this.keyManager = mock(KeyManager.class); @@ -231,8 +229,8 @@ private void mockOmSnapshotManager(OzoneManager om) throws RocksDBException, IOE } } - protected List getLastSnapshotInfos(String volume, String bucket, int numberOfSnapshotsInChain, - int index) { + protected List getLastSnapshotInfos( + String volume, String bucket, int numberOfSnapshotsInChain, int index) { List infos = getSnapshotInfos().get(getKey(volume, bucket)); int endIndex = Math.min(index - 1, infos.size() - 1); return IntStream.range(endIndex - numberOfSnapshotsInChain + 1, endIndex + 1).mapToObj(i -> i >= 0 ? diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java index 7c94d33689b9..b62eafb648e5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java @@ -58,10 +58,9 @@ public class TestReclaimableFilter extends TestAbstractReclaimableFilter { @Override - protected ReclaimableFilter initializeFilter(OzoneManager om, OmSnapshotManager snapshotManager, - SnapshotChainManager chainManager, - SnapshotInfo currentSnapshotInfo, KeyManager km, - IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain) { + protected ReclaimableFilter initializeFilter( + OzoneManager om, OmSnapshotManager snapshotManager, SnapshotChainManager chainManager, + SnapshotInfo currentSnapshotInfo, KeyManager km, IOzoneManagerLock lock, int numberOfPreviousSnapshotsFromChain) { return new ReclaimableFilter(om, snapshotManager, chainManager, currentSnapshotInfo, km, lock, numberOfPreviousSnapshotsFromChain) { @Override @@ -99,9 +98,9 @@ List testReclaimableFilterArguments() { return arguments; } - private void testSnapshotInitAndLocking(String volume, String bucket, int numberOfPreviousSnapshotsFromChain, - int index, SnapshotInfo currentSnapshotInfo, Boolean reclaimable, - Boolean expectedReturnValue) throws IOException { + private void testSnapshotInitAndLocking( + String volume, String bucket, int numberOfPreviousSnapshotsFromChain, int index, SnapshotInfo currentSnapshotInfo, + Boolean reclaimable, Boolean expectedReturnValue) throws IOException { List infos = getLastSnapshotInfos(volume, bucket, numberOfPreviousSnapshotsFromChain, index); assertEquals(expectedReturnValue, getReclaimableFilter().apply(Table.newKeyValue(getKey(volume, bucket), reclaimable))); @@ -118,9 +117,9 @@ private void testSnapshotInitAndLocking(String volume, String bucket, int number @ParameterizedTest @MethodSource("testReclaimableFilterArguments") - public void testReclaimableFilterSnapshotChainInitialization(int numberOfPreviousSnapshotsFromChain, - int actualNumberOfSnapshots, - int index) throws IOException, RocksDBException { + public void testReclaimableFilterSnapshotChainInitialization( + int numberOfPreviousSnapshotsFromChain, int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 2); String volume = getVolumes().get(3); @@ -133,9 +132,9 @@ public void testReclaimableFilterSnapshotChainInitialization(int numberOfPreviou @ParameterizedTest @MethodSource("testReclaimableFilterArguments") - public void testReclaimableFilterWithBucketVolumeMismatch(int numberOfPreviousSnapshotsFromChain, - int actualNumberOfSnapshots, - int index) throws IOException, RocksDBException { + public void testReclaimableFilterWithBucketVolumeMismatch( + int numberOfPreviousSnapshotsFromChain, int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 4); AtomicReference volume = new AtomicReference<>(getVolumes().get(2)); @@ -172,9 +171,9 @@ public void testReclaimableFilterWithBucketVolumeMismatch(int numberOfPreviousSn @ParameterizedTest @MethodSource("testReclaimableFilterArguments") - public void testReclaimabilityOnSnapshotAddition(int numberOfPreviousSnapshotsFromChain, - int actualNumberOfSnapshots, - int index) throws IOException, RocksDBException { + public void testReclaimabilityOnSnapshotAddition( + int numberOfPreviousSnapshotsFromChain, int actualNumberOfSnapshots, int index) + throws IOException, RocksDBException { SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 4); @@ -221,10 +220,9 @@ List testInvalidSnapshotArgs() { @ParameterizedTest @MethodSource("testInvalidSnapshotArgs") - public void testInitWithInactiveSnapshots(int numberOfPreviousSnapshotsFromChain, - int actualNumberOfSnapshots, - int index, - int snapIndex) throws IOException, RocksDBException { + public void testInitWithInactiveSnapshots( + int numberOfPreviousSnapshotsFromChain, int actualNumberOfSnapshots, int index, int snapIndex) + throws IOException, RocksDBException { SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 1, 1, (snapshotInfo) -> { if (snapshotInfo.getVolumeName().equals(getVolumes().get(0)) && @@ -256,10 +254,9 @@ public void testInitWithInactiveSnapshots(int numberOfPreviousSnapshotsFromChain @ParameterizedTest @MethodSource("testInvalidSnapshotArgs") - public void testInitWithUnflushedSnapshots(int numberOfPreviousSnapshotsFromChain, - int actualNumberOfSnapshots, - int index, - int snapIndex) throws IOException, RocksDBException { + public void testInitWithUnflushedSnapshots( + int numberOfPreviousSnapshotsFromChain, int actualNumberOfSnapshots, int index, + int snapIndex) throws IOException, RocksDBException { SnapshotInfo currentSnapshotInfo = setup(numberOfPreviousSnapshotsFromChain, actualNumberOfSnapshots, index, 4, 4, (snapshotInfo) -> { if (snapshotInfo.getVolumeName().equals(getVolumes().get(3)) && From 8fd746ccb5a2e42b7c4deb9cfbe1a1e54a2cedcf Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 17 Apr 2025 08:33:13 -0700 Subject: [PATCH 16/24] HDDS-12560. Remove Checked Function Change-Id: If5159bfd6cb61e3c8ab00e8322de290d27d41940 --- .../hadoop/ozone/util/CheckedFunction.java | 30 ------------------- .../om/snapshot/filter/ReclaimableFilter.java | 11 ++++--- 2 files changed, 5 insertions(+), 36 deletions(-) delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java deleted file mode 100644 index d159dea8ba09..000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/CheckedFunction.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.util; - -/** - * Represents a function that accepts one argument and produces a result. - * This is a functional interface whose functional method is apply(Object). - * Type parameters: - * – the type of the input to the function - * – the type of the result of the function - * - the type of exception thrown. - */ -public interface CheckedFunction { - R apply(T t) throws E; -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index a8ac39bbe3b0..49b50f46f3ca 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -38,7 +38,7 @@ import org.apache.hadoop.ozone.om.snapshot.MultiSnapshotLocks; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; -import org.apache.hadoop.ozone.util.CheckedFunction; +import org.apache.ratis.util.function.CheckedFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,11 +79,10 @@ public abstract class ReclaimableFilter implements CheckedFunction Date: Thu, 17 Apr 2025 08:43:17 -0700 Subject: [PATCH 17/24] Update hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java Co-authored-by: Doroszlai, Attila <6454655+adoroszlai@users.noreply.github.com> --- .../hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index 49b50f46f3ca..ea89902965a4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -46,8 +46,8 @@ * This class is responsible for opening last N snapshot given a snapshot metadata manager or AOS metadata manager by * acquiring a lock. */ -public abstract class ReclaimableFilter implements CheckedFunction, - Boolean, IOException>, Closeable { +public abstract class ReclaimableFilter + implements CheckedFunction, Boolean, IOException>, Closeable { private static final Logger LOG = LoggerFactory.getLogger(ReclaimableFilter.class); From f8a2b6ea88babe4080948a3d36d9a551e68c7eee Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 17 Apr 2025 08:48:27 -0700 Subject: [PATCH 18/24] HDDS-12560. Fix compilation issue Change-Id: I864704aa4ef0140eb2d3005795cfeb89c721246d --- ...eclaimableFilter.java => AbstractReclaimableFilterTest.java} | 0 .../hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/{TestAbstractReclaimableFilter.java => AbstractReclaimableFilterTest.java} (100%) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java similarity index 100% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestAbstractReclaimableFilter.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java index b62eafb648e5..dc0fe9daf6c3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java @@ -55,7 +55,7 @@ * Test class for ReclaimableFilter testing general initializing of snapshot chain. */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) -public class TestReclaimableFilter extends TestAbstractReclaimableFilter { +public class TestReclaimableFilter extends AbstractReclaimableFilterTest { @Override protected ReclaimableFilter initializeFilter( From 29a26d211460a69dd4a0ad2b02dcaef89d850c47 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 30 Apr 2025 13:57:10 -0400 Subject: [PATCH 19/24] HDDS-12560. Address review comments Change-Id: I443fd757524cfcd35780f06631f06112e33b0f04 --- .../om/snapshot/filter/ReclaimableFilter.java | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index ea89902965a4..ec8cd0d1100e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -55,6 +55,7 @@ public abstract class ReclaimableFilter private final SnapshotInfo currentSnapshotInfo; private final OmSnapshotManager omSnapshotManager; private final SnapshotChainManager snapshotChainManager; + // Used for tmp list to avoid lots of garbage collection of list. private final List tmpValidationSnapshotInfos; private final List lockedSnapshotIds; private final List previousSnapshotInfos; @@ -73,10 +74,10 @@ public abstract class ReclaimableFilter * @param omSnapshotManager : OmSnapshot Manager of OM instance. * @param snapshotChainManager : snapshot chain manager of OM instance. * @param currentSnapshotInfo : If null the deleted keys in Active Metadata manager needs to be processed, hence the - * latest snapshot in the snapshot chain corresponding to bucket key needs to be - * processed. + * the reference for the key in the latest snapshot in the snapshot chain needs to be + * checked. * @param keyManager : KeyManager corresponding to snapshot or Active Metadata Manager. - * @param lock : Lock for Active OM. + * @param lock : Lock Manager for Active OM. * @param numberOfPreviousSnapshotsFromChain : number of previous snapshots to be initialized. */ public ReclaimableFilter( @@ -92,7 +93,6 @@ public ReclaimableFilter( this.numberOfPreviousSnapshotsFromChain = numberOfPreviousSnapshotsFromChain; this.previousOmSnapshots = new ArrayList<>(numberOfPreviousSnapshotsFromChain); this.previousSnapshotInfos = new ArrayList<>(numberOfPreviousSnapshotsFromChain); - // Used for tmp list to avoid lots of garbage collection of list. this.tmpValidationSnapshotInfos = new ArrayList<>(numberOfPreviousSnapshotsFromChain); this.lockedSnapshotIds = new ArrayList<>(numberOfPreviousSnapshotsFromChain + 1); } @@ -104,15 +104,14 @@ private List getLastNSnapshotInChain(String volume, String bucket) "against the volume: " + volume + " and bucket: " + bucket + " of the key."); } tmpValidationSnapshotInfos.clear(); - SnapshotInfo expectedPreviousSnapshotInfo = currentSnapshotInfo == null + SnapshotInfo snapshotInfo = currentSnapshotInfo == null ? SnapshotUtils.getLatestSnapshotInfo(volume, bucket, ozoneManager, snapshotChainManager) : SnapshotUtils.getPreviousSnapshot(ozoneManager, snapshotChainManager, currentSnapshotInfo); - SnapshotInfo snapshotInfo = expectedPreviousSnapshotInfo; while (tmpValidationSnapshotInfos.size() < numberOfPreviousSnapshotsFromChain) { - // If changes made to the snapshot have not been flushed to disk, throw exception immediately, next run of - // garbage collection would process the snapshot. + // If changes made to the snapshot have not been flushed to disk, throw exception immediately. + // Next run of garbage collection would process the snapshot. if (!OmSnapshotManager.areSnapshotChangesFlushedToDB(ozoneManager.getMetadataManager(), snapshotInfo)) { - throw new IOException("Changes made to the snapshot " + snapshotInfo + " have not been flushed to the disk "); + throw new IOException("Changes made to the snapshot: " + snapshotInfo + " have not been flushed to the disk."); } tmpValidationSnapshotInfos.add(snapshotInfo); snapshotInfo = snapshotInfo == null ? null @@ -150,7 +149,7 @@ private void initializePreviousSnapshotsFromChain(String volume, String bucket) for (SnapshotInfo snapshotInfo : expectedLastNSnapshotsInChain) { lockedSnapshotIds.add(snapshotInfo == null ? null : snapshotInfo.getSnapshotId()); } - //currentSnapshotInfo for AOS will be null. + // currentSnapshotInfo will be null for AOS. lockedSnapshotIds.add(currentSnapshotInfo == null ? null : currentSnapshotInfo.getSnapshotId()); if (!snapshotIdLocks.acquireLock(lockedSnapshotIds).isLockAcquired()) { @@ -168,8 +167,8 @@ private void initializePreviousSnapshotsFromChain(String volume, String bucket) previousSnapshotInfos.add(null); } - // NOTE: Getting volumeId and bucket from active OM. This would be wrong on volume & bucket renames - // support. + // NOTE: Getting volumeId and bucket from active OM. + // This would be wrong on volume & bucket renames support. bucketInfo = ozoneManager.getBucketInfo(volume, bucket); volumeId = ozoneManager.getMetadataManager().getVolumeId(volume); } From 1ebba73ef40a865779c881b895cebe729f900b78 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 30 Apr 2025 15:23:44 -0400 Subject: [PATCH 20/24] HDDS-12560. Fix test case Change-Id: Ic1c1920826f5462cb38d36075cffa498814c96c7 --- .../hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java index dc0fe9daf6c3..9fdc87439159 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableFilter.java @@ -284,7 +284,7 @@ public void testInitWithUnflushedSnapshots( IOException ex = assertThrows(IOException.class, () -> testSnapshotInitAndLocking(volume.get(), bucket.get(), numberOfPreviousSnapshotsFromChain, index, currentSnapshotInfo, true, true)); - assertEquals(String.format("Changes made to the snapshot %s have not been flushed to the disk ", + assertEquals(String.format("Changes made to the snapshot: %s have not been flushed to the disk.", getSnapshotInfos().get(getKey(volume.get(), bucket.get())).get(snapIndex)), ex.getMessage()); } } From 3e06491a7a54992a8986c7f55306f0879951b5d4 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 5 May 2025 14:53:43 -0400 Subject: [PATCH 21/24] HDDS-12562. Fix javadoc and comments Change-Id: Ib2287ade3dfe9c186543974f79cd0e9dc353ff33 --- .../ozone/om/snapshot/filter/ReclaimableDirFilter.java | 6 +----- .../om/snapshot/filter/ReclaimableRenameEntryFilter.java | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java index 1123c6a2df73..7384ed88a9b1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableDirFilter.java @@ -32,15 +32,11 @@ import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; /** - * Filter to return deleted directories which are reclaimable based on their presence in previous snapshot in + * Class to filter out deleted directories which are reclaimable based on their presence in previous snapshot in * the snapshot chain. */ public class ReclaimableDirFilter extends ReclaimableFilter { - /** - * Filter to return deleted directories which are reclaimable based on their presence in previous snapshot in - * the snapshot chain. - */ public ReclaimableDirFilter(OzoneManager ozoneManager, OmSnapshotManager omSnapshotManager, SnapshotChainManager snapshotChainManager, SnapshotInfo currentSnapshotInfo, KeyManager keyManager, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java index 3751855e9006..563f71d81f4c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableRenameEntryFilter.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; /** - * Filter to return rename table entries which are reclaimable based on the key presence in previous snapshot's + * Class to filter out rename table entries which are reclaimable based on the key presence in previous snapshot's * keyTable/DirectoryTable in the snapshot chain. */ public class ReclaimableRenameEntryFilter extends ReclaimableFilter { From 45cb588007cec4526237be65891c0c81a0a16906 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 5 May 2025 14:53:59 -0400 Subject: [PATCH 22/24] HDDS-12562. Fix javadoc and comments Change-Id: Id8a91c6d076abb39b2053ec60c9daadcfb334265 --- .../src/main/java/org/apache/hadoop/ozone/om/KeyManager.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 80dbfdf3c6ce..268db5b75188 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -138,13 +138,13 @@ List> getRenamesKeyEntries( /** - * Returns the previous snapshot's ozone keyInfo corresponding for the object. + * Returns the previous snapshot's ozone directorInfo corresponding for the object. */ CheckedFunction getPreviousSnapshotOzoneDirInfo( long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo directoryInfo) throws IOException; /** - * Returns the previous snapshot's ozone keyInfo corresponding for the object. + * Returns the previous snapshot's ozone directoryInfo corresponding for the object. */ CheckedFunction getPreviousSnapshotOzoneDirInfo( long volumeId, OmBucketInfo bucketInfo, OmKeyInfo directoryInfo) throws IOException; From 27b360492f001674e12e007fe44b41f781851844 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 5 May 2025 23:20:28 -0400 Subject: [PATCH 23/24] HDDS-12562. Fix tests Change-Id: I6afcf9eb40aa63de50a6c789b1eabd01249d152f --- .../hdds/utils/db/InMemoryTestTable.java | 12 ++++++- .../hadoop/ozone/om/TestKeyManagerImpl.java | 33 ++++++++----------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java index 0432c7290635..62e15a08662b 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java @@ -19,6 +19,7 @@ import java.io.File; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -28,7 +29,16 @@ * InMemory Table implementation for tests. */ public final class InMemoryTestTable implements Table { - private final Map map = new ConcurrentHashMap<>(); + private final Map map; + + public InMemoryTestTable() { + this(Collections.emptyMap()); + } + + public InMemoryTestTable(Map map) { + this.map = new ConcurrentHashMap<>(); + this.map.putAll(map); + } @Override public void close() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 7023d2e21c1e..046ed87abdd9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -106,6 +106,7 @@ import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.server.SCMConfigurator; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.utils.db.InMemoryTestTable; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -1589,19 +1590,12 @@ void testGetNotExistedPart() throws IOException { assertEquals(0, locationList.size()); } - private Table getMockedTable(Map map) throws IOException { - Table table = mock(Table.class); - when(table.get(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); - when(table.getIfExist(anyString())).thenAnswer(i -> map.get(i.getArgument(0))); - return table; - } - private OmKeyInfo getMockedOmKeyInfo(OmBucketInfo bucketInfo, long parentId, String key, long objectId) { OmKeyInfo omKeyInfo = mock(OmKeyInfo.class); if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { when(omKeyInfo.getFileName()).thenReturn(key); - } else { when(omKeyInfo.getParentObjectID()).thenReturn(parentId); + } else { when(omKeyInfo.getKeyName()).thenReturn(key); } when(omKeyInfo.getObjectID()).thenReturn(objectId); @@ -1612,12 +1606,11 @@ private OmDirectoryInfo getMockedOmDirInfo(long parentId, String key, long objec OmDirectoryInfo omKeyInfo = mock(OmDirectoryInfo.class); when(omKeyInfo.getName()).thenReturn(key); when(omKeyInfo.getParentObjectID()).thenReturn(parentId); - when(omKeyInfo.getParentObjectID()).thenReturn(0L); when(omKeyInfo.getObjectID()).thenReturn(objectId); return omKeyInfo; } - private String getPath(long volumeId, OmBucketInfo bucketInfo, OmKeyInfo omKeyInfo) { + private String getDirectoryKey(long volumeId, OmBucketInfo bucketInfo, OmKeyInfo omKeyInfo) { if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { return volumeId + "/" + bucketInfo.getObjectID() + "/" + omKeyInfo.getParentObjectID() + "/" + omKeyInfo.getFileName(); @@ -1626,7 +1619,7 @@ private String getPath(long volumeId, OmBucketInfo bucketInfo, OmKeyInfo omKeyIn } } - private String getPath(long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo omDirInfo) { + private String getDirectoryKey(long volumeId, OmBucketInfo bucketInfo, OmDirectoryInfo omDirInfo) { return volumeId + "/" + bucketInfo.getObjectID() + "/" + omDirInfo.getParentObjectID() + "/" + omDirInfo.getName(); } @@ -1653,7 +1646,7 @@ public void testPreviousSnapshotOzoneDirInfo() throws IOException { .setObjectID(2L).setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED).build(); OmDirectoryInfo prevKey = getMockedOmDirInfo(5, "key", 1); OmDirectoryInfo prevKey2 = getMockedOmDirInfo(7, "key2", 2); - OmKeyInfo currentKey = getMockedOmKeyInfo(bucketInfo, 6, "renamedKey", 1); + OmKeyInfo currentKey = getMockedOmKeyInfo(bucketInfo, 6, "renamedKey", 1); OmDirectoryInfo currentKeyDir = getMockedOmDirInfo(6, "renamedKey", 1); OmKeyInfo currentKey2 = getMockedOmKeyInfo(bucketInfo, 7, "key2", 2); OmDirectoryInfo currentKeyDir2 = getMockedOmDirInfo(7, "key2", 2); @@ -1661,14 +1654,14 @@ public void testPreviousSnapshotOzoneDirInfo() throws IOException { OmDirectoryInfo currentKeyDir3 = getMockedOmDirInfo(8, "key3", 3); OmKeyInfo currentKey4 = getMockedOmKeyInfo(bucketInfo, 8, "key4", 4); OmDirectoryInfo currentKeyDir4 = getMockedOmDirInfo(8, "key4", 4); - Table prevDirTable = - getMockedTable(ImmutableMap.of( - getPath(volumeId, bucketInfo, prevKey), prevKey, - getPath(volumeId, bucketInfo, prevKey2), prevKey2)); - Table renameTable = getMockedTable( - ImmutableMap.of(getRenameKey(VOLUME_NAME, BUCKET_NAME, 1), getPath(volumeId, bucketInfo, prevKey), - getRenameKey(VOLUME_NAME, BUCKET_NAME, 3), getPath(volumeId, bucketInfo, - getMockedOmKeyInfo(bucketInfo, 6, "unknownKey", 9)))); + Table prevDirTable = new InMemoryTestTable<>( + ImmutableMap.of(getDirectoryKey(volumeId, bucketInfo, prevKey), prevKey, + getDirectoryKey(volumeId, bucketInfo, prevKey2), prevKey2)); + Table renameTable = new InMemoryTestTable<>( + ImmutableMap.of(getRenameKey(VOLUME_NAME, BUCKET_NAME, 1), + getDirectoryKey(volumeId, bucketInfo, prevKey), + getRenameKey(VOLUME_NAME, BUCKET_NAME, 3), getDirectoryKey(volumeId, bucketInfo, + getMockedOmKeyInfo(bucketInfo, 6, "unknownKey", 9)))); when(previousMetadataManager.getDirectoryTable()).thenReturn(prevDirTable); when(omMetadataManager.getSnapshotRenamedTable()).thenReturn(renameTable); assertEquals(prevKey, km.getPreviousSnapshotOzoneDirInfo(volumeId, bucketInfo, currentKey).apply(prevKM)); From b47f9864f7829a11fd28c819048eee0a477da00b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 5 May 2025 23:40:03 -0400 Subject: [PATCH 24/24] HDDS-12562. Add assert Change-Id: Ie8c453c357392c3ed6990aa04a21ef74ce62b642 --- .../ozone/om/snapshot/filter/TestReclaimableDirFilter.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java index 600e1519e6c4..a8c6a11b2eca 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/TestReclaimableDirFilter.java @@ -70,6 +70,7 @@ private void testReclaimableDirFilter(String volume, String bucket, int index, Boolean expectedValue) throws IOException { List snapshotInfos = getLastSnapshotInfos(volume, bucket, 1, index); + assertEquals(snapshotInfos.size(), 1); SnapshotInfo prevSnapshotInfo = snapshotInfos.get(0); OmBucketInfo bucketInfo = getOzoneManager().getBucketInfo(volume, bucket); long volumeId = getOzoneManager().getMetadataManager().getVolumeId(volume); @@ -129,7 +130,7 @@ public void testNonReclaimableDirectory(int actualNumberOfSnapshots, int index) @ParameterizedTest @MethodSource("testReclaimableFilterArguments") - public void testReclaimableKeyWithDifferentObjId(int actualNumberOfSnapshots, int index) + public void testReclaimableDirectoryWithDifferentObjId(int actualNumberOfSnapshots, int index) throws IOException, RocksDBException { setup(1, actualNumberOfSnapshots, index, 4, 2); String volume = getVolumes().get(3);