From 897dd318063a4fdf32278225acbf7259ed6d85d3 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Thu, 5 Jun 2025 22:28:47 +0530 Subject: [PATCH 01/15] HDDS-13180. Add replicatedSizeOfFiles to NSSummary --- .../recon/TestReconAndAdminContainerCLI.java | 2 +- ...estReconInsightsForDeletedDirectories.java | 17 +++- .../hadoop/fs/ozone/TestLeaseRecovery.java | 2 +- .../TestOzoneFSWithObjectStoreCreate.java | 2 +- .../TestOzoneFileSystemPrefixParser.java | 2 +- .../org/apache/hadoop/ozone/TestDataUtil.java | 25 +++-- .../ozone/om/TestObjectStoreWithLegacyFS.java | 2 +- .../apache/hadoop/ozone/om/TestOmMetrics.java | 4 +- ...napshotDeletingServiceIntegrationTest.java | 2 +- .../ozone/om/snapshot/TestOmSnapshot.java | 2 +- .../ozone/shell/TestOzoneDebugShell.java | 2 +- .../ozone/recon/api/OMDBInsightEndpoint.java | 19 ++-- .../ozone/recon/api/types/NSSummary.java | 13 ++- .../ozone/recon/codec/NSSummaryCodec.java | 5 +- .../tasks/NSSummaryTaskDbEventHandler.java | 2 + .../recon/api/TestOmDBInsightEndPoint.java | 93 +++++++++++++++++-- .../TestReconNamespaceSummaryManagerImpl.java | 6 +- 17 files changed, 159 insertions(+), 41 deletions(-) diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java index 70be91b78624..cf742076ca9d 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java @@ -167,7 +167,7 @@ static void init() throws Exception { String bucketName = "bucket1"; ozoneBucket = TestDataUtil.createVolumeAndBucket( - client, volumeName, bucketName, BucketLayout.FILE_SYSTEM_OPTIMIZED); + client, volumeName, bucketName, BucketLayout.FILE_SYSTEM_OPTIMIZED, null); String keyNameR3 = "key1"; containerIdR3 = setupRatisKey(recon, keyNameR3, diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 487bd116d9a4..aa0e053e5a51 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.recon; +import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -39,6 +41,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.IOUtils; @@ -52,6 +56,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.QuotaUtil; import org.apache.hadoop.ozone.recon.api.OMDBInsightEndpoint; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; @@ -80,6 +85,7 @@ public class TestReconInsightsForDeletedDirectories { private static FileSystem fs; private static String volumeName; private static String bucketName; + private static ReplicationConfig replicationConfig; private static OzoneClient client; private static ReconService recon; @@ -99,8 +105,9 @@ public static void init() throws Exception { client = cluster.newClient(); // create a volume and a bucket to be used by OzoneFileSystem - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, - BucketLayout.FILE_SYSTEM_OPTIMIZED); + replicationConfig = ReplicationConfig.fromTypeAndFactor(RATIS, THREE); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(replicationConfig)); volumeName = bucket.getVolumeName(); bucketName = bucket.getName(); @@ -147,7 +154,6 @@ public void cleanup() throws IOException { @Test public void testGetDeletedDirectoryInfo() throws Exception { - // Create a directory structure with 10 files in dir1. Path dir1 = new Path("/dir1"); fs.mkdirs(dir1); @@ -210,6 +216,7 @@ public void testGetDeletedDirectoryInfo() // Assert that the directory dir1 has 10 sub-files and size of 1000 bytes. assertEquals(10, summary.getNumOfFiles()); assertEquals(10, summary.getSizeOfFiles()); + assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), summary.getReplicatedSizeOfFiles()); } // Delete the entire directory dir1. @@ -237,6 +244,7 @@ public void testGetDeletedDirectoryInfo() (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 10. assertEquals(10, entity.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(10, replicationConfig), entity.getReplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -257,7 +265,6 @@ public void testGetDeletedDirectoryInfo() @Test public void testGetDeletedDirectoryInfoForNestedDirectories() throws Exception { - // Create a directory structure with 10 files and 3 nested directories. Path path = new Path("/dir1/dir2/dir3"); fs.mkdirs(path); @@ -326,6 +333,7 @@ public void testGetDeletedDirectoryInfoForNestedDirectories() (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 3. assertEquals(3, entity.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(3, replicationConfig), entity.getReplicatedDataSize()); // Cleanup the tables. cleanupTables(); @@ -388,6 +396,7 @@ public void testGetDeletedDirectoryInfoWithMultipleSubdirectories() (KeyInsightInfoResponse) deletedDirInfo.getEntity(); // Assert the size of deleted directory is 100. assertEquals(100, entity.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(100, replicationConfig), entity.getReplicatedDataSize()); // Cleanup the tables. cleanupTables(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index eaf98317c789..230b39d5e40d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -265,7 +265,7 @@ public void testRecoveryWithoutHsyncHflushOnLastBlock() throws Exception { @Test public void testOBSRecoveryShouldFail() throws Exception { OzoneBucket obsBucket = TestDataUtil.createVolumeAndBucket(client, - "vol2", "obs", BucketLayout.OBJECT_STORE); + "vol2", "obs", BucketLayout.OBJECT_STORE, null); String obsDir = OZONE_ROOT + obsBucket.getVolumeName() + OZONE_URI_DELIMITER + obsBucket.getName(); Path obsFile = new Path(obsDir, "file" + getTestName() + FILE_COUNTER.incrementAndGet()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index 012c7a600722..2cea4fce55b6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -104,7 +104,7 @@ public void init() throws Exception { bucketName = RandomStringUtils.secure().nextAlphabetic(10).toLowerCase(); // create a volume and a bucket to be used by OzoneFileSystem - TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY, null); String rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME, bucketName, volumeName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java index facea4409650..cd022dd99b4d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java @@ -71,7 +71,7 @@ public static void init() throws Exception { // create a volume and a bucket to be used by OzoneFileSystem try (OzoneClient client = cluster.newClient()) { TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, - BucketLayout.FILE_SYSTEM_OPTIMIZED); + BucketLayout.FILE_SYSTEM_OPTIMIZED, null); } String rootPath = String diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java index a30fc356057d..90fcfa8e2d6b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -33,6 +33,7 @@ import java.util.Scanner; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -61,11 +62,11 @@ private TestDataUtil() { public static OzoneBucket createVolumeAndBucket(OzoneClient client, String volumeName, String bucketName) throws IOException { - return createVolumeAndBucket(client, volumeName, bucketName, getDefaultBucketLayout(client)); + return createVolumeAndBucket(client, volumeName, bucketName, getDefaultBucketLayout(client), null); } public static OzoneBucket createVolumeAndBucket(OzoneClient client, - String volumeName, String bucketName, BucketLayout bucketLayout) + String volumeName, String bucketName, BucketLayout bucketLayout, DefaultReplicationConfig replicationConfig) throws IOException { BucketArgs omBucketArgs; BucketArgs.Builder builder = BucketArgs.newBuilder(); @@ -73,6 +74,10 @@ public static OzoneBucket createVolumeAndBucket(OzoneClient client, if (bucketLayout != null) { builder.setBucketLayout(bucketLayout); } + + if (replicationConfig != null) { + builder.setDefaultReplicationConfig(replicationConfig); + } omBucketArgs = builder.build(); return createVolumeAndBucket(client, volumeName, bucketName, @@ -197,18 +202,26 @@ public static OzoneBucket createLinkedBucket(OzoneClient client, String vol, Str public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout) throws IOException { - return createVolumeAndBucket(client, bucketLayout, false); + return createVolumeAndBucket(client, bucketLayout, null, false); } - public static OzoneBucket createVolumeAndBucket(OzoneClient client, - BucketLayout bucketLayout, boolean createLinkedBucket) throws IOException { + public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout, + DefaultReplicationConfig replicationConfig) + throws IOException { + return createVolumeAndBucket(client, bucketLayout, replicationConfig, false); + } + + public static OzoneBucket createVolumeAndBucket(OzoneClient client, BucketLayout bucketLayout, + DefaultReplicationConfig replicationConfig, + boolean createLinkedBucket) + throws IOException { final int attempts = 5; for (int i = 0; i < attempts; i++) { try { String volumeName = "volume" + RandomStringUtils.secure().nextNumeric(5); String bucketName = "bucket" + RandomStringUtils.secure().nextNumeric(5); OzoneBucket ozoneBucket = createVolumeAndBucket(client, volumeName, bucketName, - bucketLayout); + bucketLayout, replicationConfig); if (createLinkedBucket) { String targetBucketName = ozoneBucket.getName() + RandomStringUtils.secure().nextNumeric(5); ozoneBucket = createLinkedBucket(client, volumeName, bucketName, targetBucketName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java index 71a3ac2af7b7..cc787ca36e94 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -101,7 +101,7 @@ public void init() throws Exception { // create a volume and a bucket to be used by OzoneFileSystem TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, - BucketLayout.OBJECT_STORE); + BucketLayout.OBJECT_STORE, null); volume = client.getObjectStore().getVolume(volumeName); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 8edece39908d..b5a7576fdf1a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -357,7 +357,7 @@ public void testKeyOps() throws Exception { long initialNumDeleteObjectTaggingFails = getLongCounter("NumDeleteObjectTaggingFails", omMetrics); // see HDDS-10078 for making this work with FILE_SYSTEM_OPTIMIZED layout - TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY, null); OmKeyArgs keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); doKeyOps(keyArgs); // This will perform 7 different operations on the key @@ -487,7 +487,7 @@ public void testDirectoryOps(BucketLayout bucketLayout) throws Exception { String bucketName = UUID.randomUUID().toString(); // create bucket with different layout in each ParameterizedTest - TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, bucketLayout); + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, bucketLayout, null); // Create bucket with 2 nested directories. String rootPath = String.format("%s://%s/", diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java index f4c83fc08a5f..8db42e8bea2c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java @@ -147,7 +147,7 @@ public void setup() throws Exception { client = cluster.newClient(); om = cluster.getOzoneManager(); bucket1 = TestDataUtil.createVolumeAndBucket( - client, VOLUME_NAME, BUCKET_NAME_ONE, BucketLayout.DEFAULT); + client, VOLUME_NAME, BUCKET_NAME_ONE, BucketLayout.DEFAULT, null); } @AfterAll diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 366f61990f4c..69441f580d75 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -238,7 +238,7 @@ private void init() throws Exception { cluster.waitForClusterToBeReady(); client = cluster.newClient(); // create a volume and a bucket to be used by OzoneFileSystem - ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, createLinkedBucket); + ozoneBucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, null, createLinkedBucket); if (createLinkedBucket) { this.linkedBuckets.put(ozoneBucket.getName(), ozoneBucket.getSourceBucket()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index 14753394cfe3..274f83123f89 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -188,7 +188,7 @@ private void writeKey(String volumeName, String bucketName, ReplicationFactor.THREE); } TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, - layout); + layout, null); TestDataUtil.createKey( client.getObjectStore().getVolume(volumeName).getBucket(bucketName), keyName, repConfig, "test".getBytes(StandardCharsets.UTF_8)); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 048ab0ebc4ef..4a370083b8ef 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -58,6 +58,7 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -646,9 +647,9 @@ private void getPendingForDeletionDirInfo( keyEntityInfo.setKey(omKeyInfo.getFileName()); keyEntityInfo.setPath(createPath(omKeyInfo)); keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); - keyEntityInfo.setSize( - fetchSizeForDeletedDirectory(omKeyInfo.getObjectID())); - keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize()); + Pair sizeInfo = fetchSizeForDeletedDirectory(omKeyInfo.getObjectID()); + keyEntityInfo.setSize(sizeInfo.getLeft()); + keyEntityInfo.setReplicatedSize(sizeInfo.getRight()); keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig()); pendingForDeletionKeyInfo.setUnreplicatedDataSize( pendingForDeletionKeyInfo.getUnreplicatedDataSize() + @@ -681,17 +682,21 @@ private void getPendingForDeletionDirInfo( * @return total used data size in bytes * @throws IOException ioEx */ - protected long fetchSizeForDeletedDirectory(long objectId) + protected Pair fetchSizeForDeletedDirectory(long objectId) throws IOException { NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(objectId); if (nsSummary == null) { - return 0L; + return Pair.of(0L, 0L); } long totalSize = nsSummary.getSizeOfFiles(); + long totalReplicatedSize = nsSummary.getReplicatedSizeOfFiles(); + for (long childId : nsSummary.getChildDir()) { - totalSize += fetchSizeForDeletedDirectory(childId); + Pair childSize = fetchSizeForDeletedDirectory(childId); + totalSize += childSize.getLeft(); + totalReplicatedSize += childSize.getRight(); } - return totalSize; + return Pair.of(totalSize, totalReplicatedSize); } /** This method retrieves set of directories pending for deletion. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java index f20fdc764af5..24b43716a93e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/NSSummary.java @@ -31,24 +31,27 @@ public class NSSummary { private int numOfFiles; private long sizeOfFiles; + private long replicatedSizeOfFiles; private int[] fileSizeBucket; private Set childDir; private String dirName; private long parentId = 0; public NSSummary() { - this(0, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], + this(0, 0L, 0L, new int[ReconConstants.NUM_OF_FILE_SIZE_BINS], new HashSet<>(), "", 0); } public NSSummary(int numOfFiles, long sizeOfFiles, + long replicatedSizeOfFiles, int[] bucket, Set childDir, String dirName, long parentId) { this.numOfFiles = numOfFiles; this.sizeOfFiles = sizeOfFiles; + this.replicatedSizeOfFiles = replicatedSizeOfFiles; setFileSizeBucket(bucket); this.childDir = childDir; this.dirName = dirName; @@ -63,6 +66,10 @@ public long getSizeOfFiles() { return sizeOfFiles; } + public long getReplicatedSizeOfFiles() { + return replicatedSizeOfFiles; + } + public int[] getFileSizeBucket() { return Arrays.copyOf(fileSizeBucket, ReconConstants.NUM_OF_FILE_SIZE_BINS); } @@ -83,6 +90,10 @@ public void setSizeOfFiles(long sizeOfFiles) { this.sizeOfFiles = sizeOfFiles; } + public void setReplicatedSizeOfFiles(long replicatedSizeOfFiles) { + this.replicatedSizeOfFiles = replicatedSizeOfFiles; + } + public void setFileSizeBucket(int[] fileSizeBucket) { this.fileSizeBucket = Arrays.copyOf(fileSizeBucket, ReconConstants.NUM_OF_FILE_SIZE_BINS); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index 92068988d76e..d1967a35f771 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -67,11 +67,12 @@ public byte[] toPersistedFormatImpl(NSSummary object) throws IOException { + (numOfChildDirs + 1) * Long.BYTES // 1 long field for parentId + list size + Short.BYTES // 2 dummy shorts to track length + dirName.length // directory name length - + Long.BYTES; // Added space for parentId serialization + + 2 * Long.BYTES; // Added space for parentId serialization and replicated size of files ByteArrayOutputStream out = new ByteArrayOutputStream(resSize); out.write(integerCodec.toPersistedFormat(object.getNumOfFiles())); out.write(longCodec.toPersistedFormat(object.getSizeOfFiles())); + out.write(longCodec.toPersistedFormat(object.getReplicatedSizeOfFiles())); out.write(shortCodec.toPersistedFormat( (short) ReconConstants.NUM_OF_FILE_SIZE_BINS)); int[] fileSizeBucket = object.getFileSizeBucket(); @@ -95,6 +96,7 @@ public NSSummary fromPersistedFormatImpl(byte[] rawData) throws IOException { NSSummary res = new NSSummary(); res.setNumOfFiles(in.readInt()); res.setSizeOfFiles(in.readLong()); + res.setReplicatedSizeOfFiles(in.readLong()); short len = in.readShort(); assert (len == (short) ReconConstants.NUM_OF_FILE_SIZE_BINS); int[] fileSizeBucket = new int[len]; @@ -136,6 +138,7 @@ public NSSummary copyObject(NSSummary object) { NSSummary copy = new NSSummary(); copy.setNumOfFiles(object.getNumOfFiles()); copy.setSizeOfFiles(object.getSizeOfFiles()); + copy.setReplicatedSizeOfFiles(object.getReplicatedSizeOfFiles()); copy.setFileSizeBucket(object.getFileSizeBucket()); copy.setChildDir(object.getChildDir()); copy.setDirName(object.getDirName()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index 755d966b8328..85a926df4a18 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -91,6 +91,7 @@ protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map generatedIds = new HashSet<>(); + public static Collection replicationConfigValues() { + return Arrays.asList(new Object[][]{ + {ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE)}, + {ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE)}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(3, 2, ECReplicationConfig.EcCodec.RS, 1024))}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(6, 3, ECReplicationConfig.EcCodec.RS, 1024))}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(10, 4, ECReplicationConfig.EcCodec.XOR, 4096))} + }); + } + + public static HddsProtos.ECReplicationConfig toProto(int data, int parity, ECReplicationConfig.EcCodec codec, + int ecChunkSize) { + return HddsProtos.ECReplicationConfig.newBuilder() + .setData(data) + .setParity(parity) + .setCodec(codec.toString()) + .setEcChunkSize(ecChunkSize) + .build(); + } + private static final String VOLUME_ONE = "volume1"; private static final String OBS_BUCKET = "obs-bucket"; @@ -318,6 +349,26 @@ public void setUp() throws Exception { nsSummaryTaskWithFSO.reprocessWithFSO(reconOMMetadataManager); } + /** + * Releases resources (network sockets, database files) after each test run. + * This is critical to prevent resource leaks between tests, which would otherwise cause "Too many open files" errors. + */ + @AfterEach + public void tearDown() throws Exception { + + if (ozoneStorageContainerManager != null) { + ozoneStorageContainerManager.stop(); + } + + if (reconOMMetadataManager != null) { + reconOMMetadataManager.stop(); + } + + if (omMetadataManager != null) { + omMetadataManager.stop(); + } + } + @SuppressWarnings("methodlength") private void setUpOmData() throws Exception { List omKeyLocationInfoList = new ArrayList<>(); @@ -1403,6 +1454,19 @@ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, .build(); } + private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, + String keyName, boolean isFile, ReplicationConfig replicationConfig) { + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setFile(isFile) + .setObjectID(generateUniqueRandomLong()) + .setReplicationConfig(replicationConfig) + .setDataSize(random.nextLong()) + .build(); + } + @Test public void testGetDeletedDirInfoLimitParam() throws Exception { OmKeyInfo omKeyInfo1 = @@ -1503,15 +1567,17 @@ public void testGetDeletedDirInfo() throws Exception { keyInsightInfoResp.getLastKey()); } - @Test - public void testGetDirectorySizeInfo() throws Exception { + @ParameterizedTest + @MethodSource("replicationConfigValues") + public void testGetDirectorySizeInfo(ReplicationConfig replicationConfig) throws Exception { OmKeyInfo omKeyInfo1 = - getOmKeyInfo("sampleVol", "bucketOne", "dir1", false); + getOmKeyInfo("sampleVol", "bucketOne", "dir1", false, replicationConfig); OmKeyInfo omKeyInfo2 = - getOmKeyInfo("sampleVol", "bucketTwo", "dir2", false); + getOmKeyInfo("sampleVol", "bucketTwo", "dir2", false, replicationConfig); OmKeyInfo omKeyInfo3 = - getOmKeyInfo("sampleVol", "bucketThree", "dir3", false); + getOmKeyInfo("sampleVol", "bucketThree", "dir3", false, + replicationConfig); // Add 3 entries to deleted dir table for directory dir1, dir2 and dir3 // having object id 1, 2 and 3 respectively @@ -1525,11 +1591,11 @@ public void testGetDirectorySizeInfo() throws Exception { // Prepare NS summary data and populate the table Table table = omdbInsightEndpoint.getNsSummaryTable(); // Set size of files to 5 for directory object id 1 - table.put(omKeyInfo1.getObjectID(), getNsSummary(5L)); + table.put(omKeyInfo1.getObjectID(), getNsSummary(5L, replicationConfig)); // Set size of files to 6 for directory object id 2 - table.put(omKeyInfo2.getObjectID(), getNsSummary(6L)); + table.put(omKeyInfo2.getObjectID(), getNsSummary(6L, replicationConfig)); // Set size of files to 7 for directory object id 3 - table.put(omKeyInfo3.getObjectID(), getNsSummary(7L)); + table.put(omKeyInfo3.getObjectID(), getNsSummary(7L, replicationConfig)); Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(-1, ""); KeyInsightInfoResponse keyInsightInfoResp = @@ -1540,15 +1606,23 @@ public void testGetDirectorySizeInfo() throws Exception { // Assert the total size under directory dir1 is 5L assertEquals(5L, keyInsightInfoResp.getDeletedDirInfoList().get(0).getSize()); + assertEquals(QuotaUtil.getReplicatedSize(5L, replicationConfig), + keyInsightInfoResp.getDeletedDirInfoList().get(0).getReplicatedSize()); // Assert the total size under directory dir2 is 6L assertEquals(6L, keyInsightInfoResp.getDeletedDirInfoList().get(1).getSize()); + assertEquals(QuotaUtil.getReplicatedSize(6L, replicationConfig), + keyInsightInfoResp.getDeletedDirInfoList().get(1).getReplicatedSize()); // Assert the total size under directory dir3 is 7L assertEquals(7L, keyInsightInfoResp.getDeletedDirInfoList().get(2).getSize()); + assertEquals(QuotaUtil.getReplicatedSize(7L, replicationConfig), + keyInsightInfoResp.getDeletedDirInfoList().get(2).getReplicatedSize()); // Assert the total of all the deleted directories is 18L assertEquals(18L, keyInsightInfoResp.getUnreplicatedDataSize()); + assertEquals(QuotaUtil.getReplicatedSize(18L, replicationConfig), + keyInsightInfoResp.getReplicatedDataSize()); } @Test @@ -2014,9 +2088,10 @@ public void testListKeysLegacyBucketWithFSEnabledAndPagination() { assertEquals("", listKeysResponse.getLastKey()); } - private NSSummary getNsSummary(long size) { + private NSSummary getNsSummary(long size, ReplicationConfig replicationConfig) { NSSummary summary = new NSSummary(); summary.setSizeOfFiles(size); + summary.setReplicatedSizeOfFiles(QuotaUtil.getReplicatedSize(size, replicationConfig)); return summary; } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java index c0931ba6d35d..e33bee042560 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconNamespaceSummaryManagerImpl.java @@ -112,9 +112,9 @@ public void testInitNSSummaryTable() throws IOException { private void putThreeNSMetadata() throws IOException { HashMap hmap = new HashMap<>(); - hmap.put(1L, new NSSummary(1, 2, testBucket, TEST_CHILD_DIR, "dir1", -1)); - hmap.put(2L, new NSSummary(3, 4, testBucket, TEST_CHILD_DIR, "dir2", -1)); - hmap.put(3L, new NSSummary(5, 6, testBucket, TEST_CHILD_DIR, "dir3", -1)); + hmap.put(1L, new NSSummary(1, 2, 2 * 3, testBucket, TEST_CHILD_DIR, "dir1", -1)); + hmap.put(2L, new NSSummary(3, 4, 4 * 3, testBucket, TEST_CHILD_DIR, "dir2", -1)); + hmap.put(3L, new NSSummary(5, 6, 6 * 3, testBucket, TEST_CHILD_DIR, "dir3", -1)); RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); for (Map.Entry entry: hmap.entrySet()) { reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, From 853ebc5e2d7c6be621575175e239fa5c376deee4 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Thu, 5 Jun 2025 22:35:24 +0530 Subject: [PATCH 02/15] Reduce duplication in TestOmDBInsightEndPoint#getOmKeyInfo --- .../recon/api/TestOmDBInsightEndPoint.java | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index ed1cd231c145..f574a1d4bf6f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -1442,20 +1442,17 @@ public void testGetDeletedKeysWithBothPrevKeyAndStartPrefixProvided() private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, String keyName, boolean isFile) { - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setFile(isFile) - .setObjectID(generateUniqueRandomLong()) - .setReplicationConfig(StandaloneReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.ONE)) - .setDataSize(random.nextLong()) - .build(); + return buildOmKeyInfo(volumeName, bucketName, keyName, isFile, + StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); } private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, String keyName, boolean isFile, ReplicationConfig replicationConfig) { + return buildOmKeyInfo(volumeName, bucketName, keyName, isFile, replicationConfig); + } + + private OmKeyInfo buildOmKeyInfo(String volumeName, String bucketName, + String keyName, boolean isFile, ReplicationConfig replicationConfig) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) From 960ced286ac551ead134dbe4b354777d55ce0106 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Thu, 5 Jun 2025 22:47:27 +0530 Subject: [PATCH 03/15] Fix PMD failures --- .../recon/api/TestOmDBInsightEndPoint.java | 50 +++++++++---------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index f574a1d4bf6f..5721038b2496 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -119,33 +119,7 @@ public class TestOmDBInsightEndPoint extends AbstractReconSqlDBTest { private Random random = new Random(); private OzoneConfiguration ozoneConfiguration; private Set generatedIds = new HashSet<>(); - - public static Collection replicationConfigValues() { - return Arrays.asList(new Object[][]{ - {ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE)}, - {ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE)}, - {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, - toProto(3, 2, ECReplicationConfig.EcCodec.RS, 1024))}, - {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, - toProto(6, 3, ECReplicationConfig.EcCodec.RS, 1024))}, - {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, - toProto(10, 4, ECReplicationConfig.EcCodec.XOR, 4096))} - }); - } - - public static HddsProtos.ECReplicationConfig toProto(int data, int parity, ECReplicationConfig.EcCodec codec, - int ecChunkSize) { - return HddsProtos.ECReplicationConfig.newBuilder() - .setData(data) - .setParity(parity) - .setCodec(codec.toString()) - .setEcChunkSize(ecChunkSize) - .build(); - } - private static final String VOLUME_ONE = "volume1"; - private static final String OBS_BUCKET = "obs-bucket"; private static final String FSO_BUCKET = "fso-bucket"; private static final String EMPTY_OBS_BUCKET = "empty-obs-bucket"; @@ -287,6 +261,30 @@ public TestOmDBInsightEndPoint() { super(); } + public static Collection replicationConfigValues() { + return Arrays.asList(new Object[][]{ + {ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE)}, + {ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE)}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(3, 2, ECReplicationConfig.EcCodec.RS, 1024))}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(6, 3, ECReplicationConfig.EcCodec.RS, 1024))}, + {ReplicationConfig.fromProto(HddsProtos.ReplicationType.EC, null, + toProto(10, 4, ECReplicationConfig.EcCodec.XOR, 4096))} + }); + } + + public static HddsProtos.ECReplicationConfig toProto(int data, int parity, ECReplicationConfig.EcCodec codec, + int ecChunkSize) { + return HddsProtos.ECReplicationConfig.newBuilder() + .setData(data) + .setParity(parity) + .setCodec(codec.toString()) + .setEcChunkSize(ecChunkSize) + .build(); + } + private long generateUniqueRandomLong() { long newValue; do { From d90890a590fafd5b2f0ba8e130097f7b63116e87 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Thu, 5 Jun 2025 23:23:58 +0530 Subject: [PATCH 04/15] Add integration test to verify for EC ReplicationConfig --- ...estReconInsightsForDeletedDirectories.java | 14 ++-- ...tReconInsightsForDeletedDirectoriesEC.java | 73 +++++++++++++++++++ 2 files changed, 80 insertions(+), 7 deletions(-) create mode 100644 hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index aa0e053e5a51..46bfa2622074 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -81,13 +81,13 @@ public class TestReconInsightsForDeletedDirectories { private static final Logger LOG = LoggerFactory.getLogger(TestReconInsightsForDeletedDirectories.class); - private static MiniOzoneCluster cluster; - private static FileSystem fs; - private static String volumeName; - private static String bucketName; - private static ReplicationConfig replicationConfig; - private static OzoneClient client; - private static ReconService recon; + protected static MiniOzoneCluster cluster; + protected static FileSystem fs; + protected static String volumeName; + protected static String bucketName; + protected static ReplicationConfig replicationConfig; + protected static OzoneClient client; + protected static ReconService recon; @BeforeAll public static void init() throws Exception { diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java new file mode 100644 index 000000000000..c08f9dec8377 --- /dev/null +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon; + +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.junit.jupiter.api.BeforeAll; + +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; + +/** + * Test class to verify the correctness of the insights generated by Recon + * for Deleted Directories for EC ReplicationConfig. + */ +public class TestReconInsightsForDeletedDirectoriesEC + extends TestReconInsightsForDeletedDirectories { + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000000); + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000, + TimeUnit.MILLISECONDS); + conf.setBoolean(OZONE_ACL_ENABLED, true); + recon = new ReconService(conf); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(5) + .addService(recon) + .build(); + cluster.waitForClusterToBeReady(); + client = cluster.newClient(); + replicationConfig = new ECReplicationConfig("RS-3-2-1024k"); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(replicationConfig)); + volumeName = bucket.getVolumeName(); + bucketName = bucket.getName(); + + String rootPath = String.format("%s://%s.%s/", + OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); + + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); + + fs = FileSystem.get(conf); + } +} From 74c20757873a5eb04548d9230c59b783b579b997 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Fri, 6 Jun 2025 00:20:33 +0530 Subject: [PATCH 05/15] Fix checkstyle --- ...estReconInsightsForDeletedDirectories.java | 70 +++++++++++++++++-- ...tReconInsightsForDeletedDirectoriesEC.java | 40 +++++------ 2 files changed, 83 insertions(+), 27 deletions(-) diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 46bfa2622074..487e3e864c8e 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -81,13 +81,69 @@ public class TestReconInsightsForDeletedDirectories { private static final Logger LOG = LoggerFactory.getLogger(TestReconInsightsForDeletedDirectories.class); - protected static MiniOzoneCluster cluster; - protected static FileSystem fs; - protected static String volumeName; - protected static String bucketName; - protected static ReplicationConfig replicationConfig; - protected static OzoneClient client; - protected static ReconService recon; + private static MiniOzoneCluster cluster; + private static FileSystem fs; + private static String volumeName; + private static String bucketName; + private static ReplicationConfig replicationConfig; + private static OzoneClient client; + private static ReconService recon; + + protected static MiniOzoneCluster getCluster() { + return cluster; + } + + protected static void setCluster(MiniOzoneCluster cluster) { + TestReconInsightsForDeletedDirectories.cluster = cluster; + } + + protected static FileSystem getFs() { + return fs; + } + + protected static void setFs(FileSystem fs) { + TestReconInsightsForDeletedDirectories.fs = fs; + } + + protected static String getVolumeName() { + return volumeName; + } + + protected static void setVolumeName(String volumeName) { + TestReconInsightsForDeletedDirectories.volumeName = volumeName; + } + + protected static String getBucketName() { + return bucketName; + } + + protected static void setBucketName(String bucketName) { + TestReconInsightsForDeletedDirectories.bucketName = bucketName; + } + + protected static ReplicationConfig getReplicationConfig() { + return replicationConfig; + } + + protected static void setReplicationConfig(ReplicationConfig replicationConfig) { + TestReconInsightsForDeletedDirectories.replicationConfig = replicationConfig; + } + + protected static OzoneClient getClient() { + return client; + } + + protected static void setClient(OzoneClient client) { + TestReconInsightsForDeletedDirectories.client = client; + } + + protected static ReconService getRecon() { + return recon; + } + + protected static void setRecon(ReconService recon) { + TestReconInsightsForDeletedDirectories.recon = recon; + } @BeforeAll public static void init() throws Exception { diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java index c08f9dec8377..7b95dd6874ed 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java @@ -14,8 +14,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.hadoop.ozone.recon; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; + +import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; @@ -28,13 +35,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.junit.jupiter.api.BeforeAll; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; - /** * Test class to verify the correctness of the insights generated by Recon * for Deleted Directories for EC ReplicationConfig. @@ -49,25 +49,25 @@ public static void init() throws Exception { conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000, TimeUnit.MILLISECONDS); conf.setBoolean(OZONE_ACL_ENABLED, true); - recon = new ReconService(conf); - cluster = MiniOzoneCluster.newBuilder(conf) + setRecon(new ReconService(conf)); + setCluster(MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .addService(recon) - .build(); - cluster.waitForClusterToBeReady(); - client = cluster.newClient(); - replicationConfig = new ECReplicationConfig("RS-3-2-1024k"); - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, - new DefaultReplicationConfig(replicationConfig)); - volumeName = bucket.getVolumeName(); - bucketName = bucket.getName(); + .addService(getRecon()) + .build()); + getCluster().waitForClusterToBeReady(); + setClient(getCluster().newClient()); + setReplicationConfig(new ECReplicationConfig("RS-3-2-1024k")); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(getClient(), BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(getReplicationConfig())); + setVolumeName(bucket.getVolumeName()); + setBucketName(bucket.getName()); String rootPath = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); + OzoneConsts.OZONE_URI_SCHEME, getBucketName(), getVolumeName()); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); - fs = FileSystem.get(conf); + setFs(FileSystem.get(conf)); } } From 22c328155f1ea7ff3acea2ad6b8531c70e174e58 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Tue, 24 Jun 2025 15:26:41 +0530 Subject: [PATCH 06/15] Update Java doc for OMDBInsightEndpoint#fetchSizeForDeletedDirectory --- .../apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 4a370083b8ef..03b2fb141dbc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -675,11 +675,11 @@ private void getPendingForDeletionDirInfo( } /** - * Given an object ID, return total data size (no replication) + * Given an object ID, return total data size as a pair of Total Size, Total Replicated Size * under this object. Note:- This method is RECURSIVE. * * @param objectId the object's ID - * @return total used data size in bytes + * @return total used data size and replicated total used data size in bytes * @throws IOException ioEx */ protected Pair fetchSizeForDeletedDirectory(long objectId) From ce31658e14af960657ee679fef7dbfd56cf4b428 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Fri, 27 Jun 2025 13:37:49 +0530 Subject: [PATCH 07/15] Introduce upgrade framework for ReplicatedSizeOfFilesUpgradeAction --- .../ReconGuiceServletContextListener.java | 4 ++ .../recon/upgrade/ReconLayoutFeature.java | 3 +- .../ReplicatedSizeOfFilesUpgradeAction.java | 61 +++++++++++++++++++ 3 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java index d58e2a38381f..e02971be6eb4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java @@ -33,6 +33,10 @@ public Injector getInjector() { return injector; } + public static Injector getStaticInjector() { + return injector; + } + static void setInjector(Injector inj) { injector = inj; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java index 2b4569d449ec..050e1b26d85e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java @@ -31,7 +31,8 @@ public enum ReconLayoutFeature { // Represents the starting point for Recon's layout versioning system. INITIAL_VERSION(0, "Recon Layout Versioning Introduction"), TASK_STATUS_STATISTICS(1, "Recon Task Status Statistics Tracking Introduced"), - UNHEALTHY_CONTAINER_REPLICA_MISMATCH(2, "Adding replica mismatch state to the unhealthy container table"); + UNHEALTHY_CONTAINER_REPLICA_MISMATCH(2, "Adding replica mismatch state to the unhealthy container table"), + REPLICATED_SIZE_OF_FILES(3, "Adds replicatedSizeOfFiles to NSSummary"); private final int version; private final String description; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java new file mode 100644 index 000000000000..3ecfac465f11 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReplicatedSizeOfFilesUpgradeAction.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import com.google.inject.Injector; +import javax.sql.DataSource; +import org.apache.hadoop.ozone.recon.ReconGuiceServletContextListener; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Upgrade action for the REPLICATED_SIZE_OF_FILES layout feature. + * The action triggers a full rebuild of the NSSummary ensuring that the new field: replicatedSizeOfFiles is correctly + * populated for all objects. + */ +@UpgradeActionRecon(feature = ReconLayoutFeature.REPLICATED_SIZE_OF_FILES, + type = ReconUpgradeAction.UpgradeActionType.FINALIZE) +public class ReplicatedSizeOfFilesUpgradeAction implements ReconUpgradeAction { + + private static final Logger LOG = LoggerFactory.getLogger(ReplicatedSizeOfFilesUpgradeAction.class); + + @Override + public void execute(DataSource dataSource) { + try { + Injector injector = ReconGuiceServletContextListener.getStaticInjector(); + if (injector == null) { + throw new IllegalStateException("Guice injector is not initialized. Cannot perform NSSummary rebuild."); + } + ReconNamespaceSummaryManager nsSummaryManager = injector.getInstance(ReconNamespaceSummaryManager.class); + ReconOMMetadataManager omMetadataManager = injector.getInstance(ReconOMMetadataManager.class); + LOG.info("Starting full rebuild of NSSummary for REPLICATED_SIZE_OF_FILES upgrade..."); + nsSummaryManager.rebuildNSSummaryTree(omMetadataManager); + LOG.info("Completed full rebuild of NSSummary for REPLICATED_SIZE_OF_FILES upgrade."); + } catch (Exception e) { + LOG.error("Error during NSSummary rebuild for REPLICATED_SIZE_OF_FILES upgrade.", e); + throw new RuntimeException("Failed to rebuild NSSummary during upgrade", e); + } + } + + @Override + public UpgradeActionType getType() { + return UpgradeActionType.FINALIZE; + } +} From 24702dcacc53ae4d3c521050c88a70ca320a9a6b Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Tue, 8 Jul 2025 14:12:31 +0530 Subject: [PATCH 08/15] Fix TestNSSummaryTaskWithFSO failures --- .../ozone/recon/tasks/TestNSSummaryTaskWithFSO.java | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java index 75fb468c5a98..aae0b5d061ab 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.recon.tasks; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; @@ -34,6 +35,7 @@ import java.util.List; import java.util.Set; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -483,16 +485,16 @@ void testProcessWithFSOFlushAfterThresholdAndFailureOfLastElement() Mockito.when(event4.getAction()).thenReturn(OMDBUpdateEvent.OMDBUpdateAction.PUT); OmKeyInfo keyInfo1 = new OmKeyInfo.Builder().setParentObjectID(1).setObjectID(2).setKeyName("key1") - .setBucketName("bucket1") + .setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setDataSize(1024).setVolumeName("volume1").build(); OmKeyInfo keyInfo2 = new OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2") - .setBucketName("bucket1") + .setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setDataSize(1024).setVolumeName("volume1").build(); OmKeyInfo keyInfo3 = new OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2") - .setBucketName("bucket1") + .setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setDataSize(1024).setVolumeName("volume1").build(); OmKeyInfo keyInfo4 = new OmKeyInfo.Builder().setParentObjectID(1).setObjectID(3).setKeyName("key2") - .setBucketName("bucket1") + .setBucketName("bucket1").setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setDataSize(1024).setVolumeName("volume1").build(); Mockito.when(event1.getValue()).thenReturn(keyInfo1); Mockito.when(event2.getValue()).thenReturn(keyInfo2); From dc6ae020e15ac0333c34676eaa119371438a9c38 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Tue, 15 Jul 2025 17:06:50 +0530 Subject: [PATCH 09/15] Add new TestDataUtil.createVolumeAndBucket --- .../recon/TestReconAndAdminContainerCLI.java | 2 +- .../hadoop/fs/ozone/TestLeaseRecovery.java | 2 +- .../TestOzoneFSWithObjectStoreCreate.java | 2 +- .../TestOzoneFileSystemPrefixParser.java | 2 +- .../org/apache/hadoop/ozone/TestDataUtil.java | 16 ++++++++++- .../ozone/om/TestObjectStoreWithLegacyFS.java | 2 +- .../apache/hadoop/ozone/om/TestOmMetrics.java | 4 +-- ...napshotDeletingServiceIntegrationTest.java | 2 +- .../ozone/shell/TestOzoneDebugShell.java | 2 +- .../ozone/recon/api/OMDBInsightEndpoint.java | 28 +++++++++++-------- 10 files changed, 41 insertions(+), 21 deletions(-) diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java index cf742076ca9d..70be91b78624 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java @@ -167,7 +167,7 @@ static void init() throws Exception { String bucketName = "bucket1"; ozoneBucket = TestDataUtil.createVolumeAndBucket( - client, volumeName, bucketName, BucketLayout.FILE_SYSTEM_OPTIMIZED, null); + client, volumeName, bucketName, BucketLayout.FILE_SYSTEM_OPTIMIZED); String keyNameR3 = "key1"; containerIdR3 = setupRatisKey(recon, keyNameR3, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index 230b39d5e40d..eaf98317c789 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -265,7 +265,7 @@ public void testRecoveryWithoutHsyncHflushOnLastBlock() throws Exception { @Test public void testOBSRecoveryShouldFail() throws Exception { OzoneBucket obsBucket = TestDataUtil.createVolumeAndBucket(client, - "vol2", "obs", BucketLayout.OBJECT_STORE, null); + "vol2", "obs", BucketLayout.OBJECT_STORE); String obsDir = OZONE_ROOT + obsBucket.getVolumeName() + OZONE_URI_DELIMITER + obsBucket.getName(); Path obsFile = new Path(obsDir, "file" + getTestName() + FILE_COUNTER.incrementAndGet()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index 2cea4fce55b6..012c7a600722 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -104,7 +104,7 @@ public void init() throws Exception { bucketName = RandomStringUtils.secure().nextAlphabetic(10).toLowerCase(); // create a volume and a bucket to be used by OzoneFileSystem - TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY, null); + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); String rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME, bucketName, volumeName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java index cd022dd99b4d..facea4409650 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java @@ -71,7 +71,7 @@ public static void init() throws Exception { // create a volume and a bucket to be used by OzoneFileSystem try (OzoneClient client = cluster.newClient()) { TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, - BucketLayout.FILE_SYSTEM_OPTIMIZED, null); + BucketLayout.FILE_SYSTEM_OPTIMIZED); } String rootPath = String diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java index 90fcfa8e2d6b..7ac80ef40584 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -62,7 +62,21 @@ private TestDataUtil() { public static OzoneBucket createVolumeAndBucket(OzoneClient client, String volumeName, String bucketName) throws IOException { - return createVolumeAndBucket(client, volumeName, bucketName, getDefaultBucketLayout(client), null); + return createVolumeAndBucket(client, volumeName, bucketName, getDefaultBucketLayout(client)); + } + + public static OzoneBucket createVolumeAndBucket(OzoneClient client, + String volumeName, String bucketName, BucketLayout bucketLayout) throws IOException { + BucketArgs omBucketArgs; + BucketArgs.Builder builder = BucketArgs.newBuilder(); + builder.setStorageType(StorageType.DISK); + if (bucketLayout != null) { + builder.setBucketLayout(bucketLayout); + } + omBucketArgs = builder.build(); + + return createVolumeAndBucket(client, volumeName, bucketName, + omBucketArgs); } public static OzoneBucket createVolumeAndBucket(OzoneClient client, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java index cc787ca36e94..71a3ac2af7b7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -101,7 +101,7 @@ public void init() throws Exception { // create a volume and a bucket to be used by OzoneFileSystem TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, - BucketLayout.OBJECT_STORE, null); + BucketLayout.OBJECT_STORE); volume = client.getObjectStore().getVolume(volumeName); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index b5a7576fdf1a..8edece39908d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -357,7 +357,7 @@ public void testKeyOps() throws Exception { long initialNumDeleteObjectTaggingFails = getLongCounter("NumDeleteObjectTaggingFails", omMetrics); // see HDDS-10078 for making this work with FILE_SYSTEM_OPTIMIZED layout - TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY, null); + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); OmKeyArgs keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); doKeyOps(keyArgs); // This will perform 7 different operations on the key @@ -487,7 +487,7 @@ public void testDirectoryOps(BucketLayout bucketLayout) throws Exception { String bucketName = UUID.randomUUID().toString(); // create bucket with different layout in each ParameterizedTest - TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, bucketLayout, null); + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, bucketLayout); // Create bucket with 2 nested directories. String rootPath = String.format("%s://%s/", diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java index 8db42e8bea2c..f4c83fc08a5f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java @@ -147,7 +147,7 @@ public void setup() throws Exception { client = cluster.newClient(); om = cluster.getOzoneManager(); bucket1 = TestDataUtil.createVolumeAndBucket( - client, VOLUME_NAME, BUCKET_NAME_ONE, BucketLayout.DEFAULT, null); + client, VOLUME_NAME, BUCKET_NAME_ONE, BucketLayout.DEFAULT); } @AfterAll diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index 274f83123f89..14753394cfe3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -188,7 +188,7 @@ private void writeKey(String volumeName, String bucketName, ReplicationFactor.THREE); } TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, - layout, null); + layout); TestDataUtil.createKey( client.getObjectStore().getVolume(volumeName).getBucket(bucketName), keyName, repConfig, "test".getBytes(StandardCharsets.UTF_8)); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 03b2fb141dbc..840c14b12ace 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -42,8 +42,10 @@ import com.google.common.annotations.VisibleForTesting; import java.io.IOException; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; +import java.util.Deque; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; @@ -684,17 +686,21 @@ private void getPendingForDeletionDirInfo( */ protected Pair fetchSizeForDeletedDirectory(long objectId) throws IOException { - NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(objectId); - if (nsSummary == null) { - return Pair.of(0L, 0L); - } - long totalSize = nsSummary.getSizeOfFiles(); - long totalReplicatedSize = nsSummary.getReplicatedSizeOfFiles(); - - for (long childId : nsSummary.getChildDir()) { - Pair childSize = fetchSizeForDeletedDirectory(childId); - totalSize += childSize.getLeft(); - totalReplicatedSize += childSize.getRight(); + long totalSize = 0; + long totalReplicatedSize = 0; + Deque stack = new ArrayDeque(); + stack.push(objectId); + + while (!stack.isEmpty()) { + long currentId = stack.pop(); + NSSummary nsSummary = reconNamespaceSummaryManager.getNSSummary(currentId); + if (nsSummary != null) { + totalSize += nsSummary.getSizeOfFiles(); + totalReplicatedSize += nsSummary.getReplicatedSizeOfFiles(); + for (long childId : nsSummary.getChildDir()) { + stack.push(childId); + } + } } return Pair.of(totalSize, totalReplicatedSize); } From 0763ff3955bbf61452692fd96e9a6e2dad0c9259 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Wed, 16 Jul 2025 13:55:21 +0530 Subject: [PATCH 10/15] Fix compile failures --- .../hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java index 7db6c8d41db6..fca8b137b720 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java @@ -132,7 +132,7 @@ public void setupFsClient() throws IOException { writeClient = objectStore.getClientProxy().getOzoneManagerClient(); ozoneManager = cluster().getOzoneManager(); - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, createLinkedBuckets); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, bucketLayout, null, createLinkedBuckets); if (createLinkedBuckets) { linkedBucketMaps.put(bucket.getName(), bucket.getSourceBucket()); } From 367ac09dc7bcdbbb8bd5954a20c81b2794a08db8 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Mon, 21 Jul 2025 11:23:50 +0530 Subject: [PATCH 11/15] Remove getters and setters in parent test --- ...estReconInsightsForDeletedDirectories.java | 70 ++----------------- ...tReconInsightsForDeletedDirectoriesEC.java | 26 +++---- 2 files changed, 20 insertions(+), 76 deletions(-) diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 487e3e864c8e..46bfa2622074 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -81,69 +81,13 @@ public class TestReconInsightsForDeletedDirectories { private static final Logger LOG = LoggerFactory.getLogger(TestReconInsightsForDeletedDirectories.class); - private static MiniOzoneCluster cluster; - private static FileSystem fs; - private static String volumeName; - private static String bucketName; - private static ReplicationConfig replicationConfig; - private static OzoneClient client; - private static ReconService recon; - - protected static MiniOzoneCluster getCluster() { - return cluster; - } - - protected static void setCluster(MiniOzoneCluster cluster) { - TestReconInsightsForDeletedDirectories.cluster = cluster; - } - - protected static FileSystem getFs() { - return fs; - } - - protected static void setFs(FileSystem fs) { - TestReconInsightsForDeletedDirectories.fs = fs; - } - - protected static String getVolumeName() { - return volumeName; - } - - protected static void setVolumeName(String volumeName) { - TestReconInsightsForDeletedDirectories.volumeName = volumeName; - } - - protected static String getBucketName() { - return bucketName; - } - - protected static void setBucketName(String bucketName) { - TestReconInsightsForDeletedDirectories.bucketName = bucketName; - } - - protected static ReplicationConfig getReplicationConfig() { - return replicationConfig; - } - - protected static void setReplicationConfig(ReplicationConfig replicationConfig) { - TestReconInsightsForDeletedDirectories.replicationConfig = replicationConfig; - } - - protected static OzoneClient getClient() { - return client; - } - - protected static void setClient(OzoneClient client) { - TestReconInsightsForDeletedDirectories.client = client; - } - - protected static ReconService getRecon() { - return recon; - } - - protected static void setRecon(ReconService recon) { - TestReconInsightsForDeletedDirectories.recon = recon; - } + protected static MiniOzoneCluster cluster; + protected static FileSystem fs; + protected static String volumeName; + protected static String bucketName; + protected static ReplicationConfig replicationConfig; + protected static OzoneClient client; + protected static ReconService recon; @BeforeAll public static void init() throws Exception { diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java index 7b95dd6874ed..544a9c7a4f71 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java @@ -49,25 +49,25 @@ public static void init() throws Exception { conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000, TimeUnit.MILLISECONDS); conf.setBoolean(OZONE_ACL_ENABLED, true); - setRecon(new ReconService(conf)); - setCluster(MiniOzoneCluster.newBuilder(conf) + recon = new ReconService(conf); + cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .addService(getRecon()) - .build()); - getCluster().waitForClusterToBeReady(); - setClient(getCluster().newClient()); - setReplicationConfig(new ECReplicationConfig("RS-3-2-1024k")); - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(getClient(), BucketLayout.FILE_SYSTEM_OPTIMIZED, - new DefaultReplicationConfig(getReplicationConfig())); - setVolumeName(bucket.getVolumeName()); - setBucketName(bucket.getName()); + .addService(recon) + .build(); + cluster.waitForClusterToBeReady(); + client = cluster.newClient(); + replicationConfig = new ECReplicationConfig("RS-3-2-1024k"); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(replicationConfig)); + volumeName = bucket.getVolumeName(); + bucketName = bucket.getName(); String rootPath = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, getBucketName(), getVolumeName()); + OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); - setFs(FileSystem.get(conf)); + fs = FileSystem.get(conf); } } From db5ad703afe9e165ec52dac0b2eb781ed33f7a37 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Mon, 21 Jul 2025 11:47:46 +0530 Subject: [PATCH 12/15] Add more tests --- ...estReplicatedSizeOfFilesUpgradeAction.java | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java new file mode 100644 index 000000000000..46af00e9c23a --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/upgrade/TestReplicatedSizeOfFilesUpgradeAction.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.inject.Injector; +import javax.sql.DataSource; +import org.apache.hadoop.ozone.recon.ReconGuiceServletContextListener; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.junit.jupiter.MockitoExtension; + +/** + * Test class for ReplicatedSizeOfFilesUpgradeAction. + */ +@ExtendWith(MockitoExtension.class) +public class TestReplicatedSizeOfFilesUpgradeAction { + + private ReplicatedSizeOfFilesUpgradeAction upgradeAction; + @Mock + private DataSource mockDataSource; + @Mock + private Injector mockInjector; + @Mock + private ReconNamespaceSummaryManager mockNsSummaryManager; + @Mock + private ReconOMMetadataManager mockOmMetadataManager; + + @BeforeEach + public void setUp() { + upgradeAction = new ReplicatedSizeOfFilesUpgradeAction(); + } + + @Test + public void testExecuteSuccessfullyRebuildsNSSummary() { + try (MockedStatic mockStaticContext = + mockStatic(ReconGuiceServletContextListener.class)) { + mockStaticContext.when(ReconGuiceServletContextListener::getStaticInjector).thenReturn(mockInjector); + when(mockInjector.getInstance(ReconNamespaceSummaryManager.class)).thenReturn(mockNsSummaryManager); + when(mockInjector.getInstance(ReconOMMetadataManager.class)).thenReturn(mockOmMetadataManager); + + upgradeAction.execute(mockDataSource); + + // Verify that rebuildNSSummaryTree was called exactly once. + verify(mockNsSummaryManager, times(1)).rebuildNSSummaryTree(mockOmMetadataManager); + } + } + + @Test + public void testExecuteThrowsRuntimeExceptionOnRebuildFailure() { + try (MockedStatic mockStaticContext = + mockStatic(ReconGuiceServletContextListener.class)) { + mockStaticContext.when(ReconGuiceServletContextListener::getStaticInjector).thenReturn(mockInjector); + when(mockInjector.getInstance(ReconNamespaceSummaryManager.class)).thenReturn(mockNsSummaryManager); + when(mockInjector.getInstance(ReconOMMetadataManager.class)).thenReturn(mockOmMetadataManager); + + // Simulate a failure during the rebuild process + doThrow(new RuntimeException("Simulated rebuild error")).when(mockNsSummaryManager) + .rebuildNSSummaryTree(any(ReconOMMetadataManager.class)); + + RuntimeException thrown = assertThrows(RuntimeException.class, () -> upgradeAction.execute(mockDataSource)); + assertEquals("Failed to rebuild NSSummary during upgrade", thrown.getMessage()); + } + } + + @Test + public void testGetTypeReturnsFinalize() { + assertEquals(ReconUpgradeAction.UpgradeActionType.FINALIZE, upgradeAction.getType()); + } +} From 4e96ae0ffb8ffd470066ebac7dbf1cf647e3bf3b Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Mon, 21 Jul 2025 11:57:12 +0530 Subject: [PATCH 13/15] Revert "Remove getters and setters in parent test" This reverts commit 367ac09dc7bcdbbb8bd5954a20c81b2794a08db8. --- ...estReconInsightsForDeletedDirectories.java | 70 +++++++++++++++++-- ...tReconInsightsForDeletedDirectoriesEC.java | 26 +++---- 2 files changed, 76 insertions(+), 20 deletions(-) diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 46bfa2622074..487e3e864c8e 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -81,13 +81,69 @@ public class TestReconInsightsForDeletedDirectories { private static final Logger LOG = LoggerFactory.getLogger(TestReconInsightsForDeletedDirectories.class); - protected static MiniOzoneCluster cluster; - protected static FileSystem fs; - protected static String volumeName; - protected static String bucketName; - protected static ReplicationConfig replicationConfig; - protected static OzoneClient client; - protected static ReconService recon; + private static MiniOzoneCluster cluster; + private static FileSystem fs; + private static String volumeName; + private static String bucketName; + private static ReplicationConfig replicationConfig; + private static OzoneClient client; + private static ReconService recon; + + protected static MiniOzoneCluster getCluster() { + return cluster; + } + + protected static void setCluster(MiniOzoneCluster cluster) { + TestReconInsightsForDeletedDirectories.cluster = cluster; + } + + protected static FileSystem getFs() { + return fs; + } + + protected static void setFs(FileSystem fs) { + TestReconInsightsForDeletedDirectories.fs = fs; + } + + protected static String getVolumeName() { + return volumeName; + } + + protected static void setVolumeName(String volumeName) { + TestReconInsightsForDeletedDirectories.volumeName = volumeName; + } + + protected static String getBucketName() { + return bucketName; + } + + protected static void setBucketName(String bucketName) { + TestReconInsightsForDeletedDirectories.bucketName = bucketName; + } + + protected static ReplicationConfig getReplicationConfig() { + return replicationConfig; + } + + protected static void setReplicationConfig(ReplicationConfig replicationConfig) { + TestReconInsightsForDeletedDirectories.replicationConfig = replicationConfig; + } + + protected static OzoneClient getClient() { + return client; + } + + protected static void setClient(OzoneClient client) { + TestReconInsightsForDeletedDirectories.client = client; + } + + protected static ReconService getRecon() { + return recon; + } + + protected static void setRecon(ReconService recon) { + TestReconInsightsForDeletedDirectories.recon = recon; + } @BeforeAll public static void init() throws Exception { diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java index 544a9c7a4f71..7b95dd6874ed 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java @@ -49,25 +49,25 @@ public static void init() throws Exception { conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000, TimeUnit.MILLISECONDS); conf.setBoolean(OZONE_ACL_ENABLED, true); - recon = new ReconService(conf); - cluster = MiniOzoneCluster.newBuilder(conf) + setRecon(new ReconService(conf)); + setCluster(MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .addService(recon) - .build(); - cluster.waitForClusterToBeReady(); - client = cluster.newClient(); - replicationConfig = new ECReplicationConfig("RS-3-2-1024k"); - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, - new DefaultReplicationConfig(replicationConfig)); - volumeName = bucket.getVolumeName(); - bucketName = bucket.getName(); + .addService(getRecon()) + .build()); + getCluster().waitForClusterToBeReady(); + setClient(getCluster().newClient()); + setReplicationConfig(new ECReplicationConfig("RS-3-2-1024k")); + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(getClient(), BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(getReplicationConfig())); + setVolumeName(bucket.getVolumeName()); + setBucketName(bucket.getName()); String rootPath = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); + OzoneConsts.OZONE_URI_SCHEME, getBucketName(), getVolumeName()); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); - fs = FileSystem.get(conf); + setFs(FileSystem.get(conf)); } } From 3af9d24d7ad475a4802d6bfe6f1c9c6ce5a62238 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Tue, 22 Jul 2025 15:33:00 +0530 Subject: [PATCH 14/15] Make TestReconInsightsForDeletedDirectories parameterized --- ...estReconInsightsForDeletedDirectories.java | 135 +++++++----------- ...tReconInsightsForDeletedDirectoriesEC.java | 73 ---------- 2 files changed, 53 insertions(+), 155 deletions(-) delete mode 100644 hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 487e3e864c8e..3b1035b83691 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -42,6 +43,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; @@ -68,7 +70,9 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -83,99 +87,40 @@ public class TestReconInsightsForDeletedDirectories { private static MiniOzoneCluster cluster; private static FileSystem fs; - private static String volumeName; - private static String bucketName; - private static ReplicationConfig replicationConfig; private static OzoneClient client; private static ReconService recon; - - protected static MiniOzoneCluster getCluster() { - return cluster; - } - - protected static void setCluster(MiniOzoneCluster cluster) { - TestReconInsightsForDeletedDirectories.cluster = cluster; - } - - protected static FileSystem getFs() { - return fs; - } - - protected static void setFs(FileSystem fs) { - TestReconInsightsForDeletedDirectories.fs = fs; - } - - protected static String getVolumeName() { - return volumeName; - } - - protected static void setVolumeName(String volumeName) { - TestReconInsightsForDeletedDirectories.volumeName = volumeName; - } - - protected static String getBucketName() { - return bucketName; - } - - protected static void setBucketName(String bucketName) { - TestReconInsightsForDeletedDirectories.bucketName = bucketName; - } - - protected static ReplicationConfig getReplicationConfig() { - return replicationConfig; - } - - protected static void setReplicationConfig(ReplicationConfig replicationConfig) { - TestReconInsightsForDeletedDirectories.replicationConfig = replicationConfig; - } - - protected static OzoneClient getClient() { - return client; - } - - protected static void setClient(OzoneClient client) { - TestReconInsightsForDeletedDirectories.client = client; - } - - protected static ReconService getRecon() { - return recon; - } - - protected static void setRecon(ReconService recon) { - TestReconInsightsForDeletedDirectories.recon = recon; - } + private static OzoneConfiguration conf; @BeforeAll public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); + conf = new OzoneConfiguration(); conf.setInt(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000000); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000, TimeUnit.MILLISECONDS); conf.setBoolean(OZONE_ACL_ENABLED, true); recon = new ReconService(conf); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) + .setNumDatanodes(5) .addService(recon) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); - // create a volume and a bucket to be used by OzoneFileSystem - replicationConfig = ReplicationConfig.fromTypeAndFactor(RATIS, THREE); - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, - new DefaultReplicationConfig(replicationConfig)); - volumeName = bucket.getVolumeName(); - bucketName = bucket.getName(); - - String rootPath = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); - - // Set the fs.defaultFS and start the filesystem - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); // Set the number of keys to be processed during batch operate. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); + } - fs = FileSystem.get(conf); + /** + * Provides a list of replication configurations (RATIS and EC) + * to be used for parameterized tests. + * + * @return List of replication configurations as Arguments. + */ + static List replicationConfigs() { + return Arrays.asList( + Arguments.of(ReplicationConfig.fromTypeAndFactor(RATIS, THREE)), + Arguments.of(new ECReplicationConfig("RS-3-2-1024k")) + ); } @AfterAll @@ -184,7 +129,6 @@ public static void teardown() { if (cluster != null) { cluster.shutdown(); } - IOUtils.closeQuietly(fs); } @AfterEach @@ -196,6 +140,8 @@ public void cleanup() throws IOException { fs.delete(fileStatus.getPath(), true); } }); + + IOUtils.closeQuietly(fs); } /** @@ -207,9 +153,17 @@ public void cleanup() throws IOException { * ├── ... * └── file10 */ - @Test - public void testGetDeletedDirectoryInfo() + @ParameterizedTest + @MethodSource("replicationConfigs") + public void testGetDeletedDirectoryInfo(ReplicationConfig replicationConfig) throws Exception { + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(replicationConfig)); + String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), + bucket.getVolumeName()); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + fs = FileSystem.get(conf); + // Create a directory structure with 10 files in dir1. Path dir1 = new Path("/dir1"); fs.mkdirs(dir1); @@ -318,9 +272,17 @@ public void testGetDeletedDirectoryInfo() * │ │ └── file3 * */ - @Test - public void testGetDeletedDirectoryInfoForNestedDirectories() + @ParameterizedTest + @MethodSource("replicationConfigs") + public void testGetDeletedDirectoryInfoForNestedDirectories(ReplicationConfig replicationConfig) throws Exception { + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(replicationConfig)); + String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), + bucket.getVolumeName()); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + fs = FileSystem.get(conf); + // Create a directory structure with 10 files and 3 nested directories. Path path = new Path("/dir1/dir2/dir3"); fs.mkdirs(path); @@ -416,9 +378,18 @@ public void testGetDeletedDirectoryInfoForNestedDirectories() * ├── ... * └── file10 */ - @Test - public void testGetDeletedDirectoryInfoWithMultipleSubdirectories() + @ParameterizedTest + @MethodSource("replicationConfigs") + public void testGetDeletedDirectoryInfoWithMultipleSubdirectories(ReplicationConfig replicationConfig) throws Exception { + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED, + new DefaultReplicationConfig(replicationConfig)); + String rootPath = String.format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), + bucket.getVolumeName()); + // Set the fs.defaultFS and start the filesystem + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + fs = FileSystem.get(conf); + int numSubdirectories = 10; int filesPerSubdirectory = 10; diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java deleted file mode 100644 index 7b95dd6874ed..000000000000 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectoriesEC.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; - -import java.util.concurrent.TimeUnit; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hdds.client.DefaultReplicationConfig; -import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.junit.jupiter.api.BeforeAll; - -/** - * Test class to verify the correctness of the insights generated by Recon - * for Deleted Directories for EC ReplicationConfig. - */ -public class TestReconInsightsForDeletedDirectoriesEC - extends TestReconInsightsForDeletedDirectories { - - @BeforeAll - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000000); - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000, - TimeUnit.MILLISECONDS); - conf.setBoolean(OZONE_ACL_ENABLED, true); - setRecon(new ReconService(conf)); - setCluster(MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5) - .addService(getRecon()) - .build()); - getCluster().waitForClusterToBeReady(); - setClient(getCluster().newClient()); - setReplicationConfig(new ECReplicationConfig("RS-3-2-1024k")); - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(getClient(), BucketLayout.FILE_SYSTEM_OPTIMIZED, - new DefaultReplicationConfig(getReplicationConfig())); - setVolumeName(bucket.getVolumeName()); - setBucketName(bucket.getName()); - - String rootPath = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, getBucketName(), getVolumeName()); - - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); - conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); - - setFs(FileSystem.get(conf)); - } -} From 138959f3d763ccd81d3ae1837eec8d96c9ad7702 Mon Sep 17 00:00:00 2001 From: Penumudy Tanvi Date: Tue, 22 Jul 2025 15:45:34 +0530 Subject: [PATCH 15/15] Fix findbugs --- .../ozone/recon/TestReconInsightsForDeletedDirectories.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 3b1035b83691..09c80590e138 100644 --- a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -86,7 +86,7 @@ public class TestReconInsightsForDeletedDirectories { LoggerFactory.getLogger(TestReconInsightsForDeletedDirectories.class); private static MiniOzoneCluster cluster; - private static FileSystem fs; + private FileSystem fs; private static OzoneClient client; private static ReconService recon; private static OzoneConfiguration conf;