From 49ef9e8db7437231050a40f61ec6a719ea0ceba4 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 23 Oct 2023 15:23:27 +0530 Subject: [PATCH 01/31] Rebased the PR --- .../recon/api/handlers/EntityHandler.java | 64 ++-- .../recon/api/handlers/OBSBucketHandler.java | 318 ++++++++++++++++++ .../ozone/recon/tasks/NSSummaryTask.java | 1 - .../recon/tasks/NSSummaryTaskWithOBS.java | 156 +++++++++ 4 files changed, 517 insertions(+), 22 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index 45a7290ddad9..94799fd041fe 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -32,9 +32,12 @@ import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.ArrayList; import java.util.Set; @@ -46,6 +49,10 @@ */ public abstract class EntityHandler { + + private static final Logger LOG = LoggerFactory.getLogger( + EntityHandler.class); + private final ReconNamespaceSummaryManager reconNamespaceSummaryManager; private final ReconOMMetadataManager omMetadataManager; @@ -117,69 +124,84 @@ public String[] getNames() { * @param path the original path request used to identify root level * @return the entity handler of client's request */ + public static EntityHandler getEntityHandler( - ReconNamespaceSummaryManager reconNamespaceSummaryManager, - ReconOMMetadataManager omMetadataManager, - OzoneStorageContainerManager reconSCM, - String path) throws IOException { + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager, + OzoneStorageContainerManager reconSCM, + String path) throws IOException { BucketHandler bucketHandler; String normalizedPath = normalizePath(path); + LOG.debug("normalizedPath: {}", normalizedPath); + String[] names = parseRequestPath(normalizedPath); + LOG.debug("names: {}", Arrays.toString(names)); + if (path.equals(OM_KEY_PREFIX)) { + LOG.debug("path is OM_KEY_PREFIX"); return EntityType.ROOT.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); } if (names.length == 0) { + LOG.debug("names length is 0"); return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); - } else if (names.length == 1) { // volume level check + omMetadataManager, reconSCM, null, path); + } else if (names.length == 1) { + LOG.debug("names length is 1"); String volName = names[0]; if (!volumeExists(omMetadataManager, volName)) { + LOG.debug("volume {} doesn't exist", volName); return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); } + LOG.debug("volume {} exists", volName); return EntityType.VOLUME.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); - } else if (names.length == 2) { // bucket level check + omMetadataManager, reconSCM, null, path); + } else if (names.length == 2) { + LOG.debug("names length is 2"); String volName = names[0]; String bucketName = names[1]; bucketHandler = BucketHandler.getBucketHandler( - reconNamespaceSummaryManager, - omMetadataManager, reconSCM, - volName, bucketName); + reconNamespaceSummaryManager, + omMetadataManager, reconSCM, + volName, bucketName); if (bucketHandler == null || !bucketHandler.bucketExists(volName, bucketName)) { + LOG.debug("bucket {} doesn't exist", bucketName); return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); } + LOG.debug("bucket {} exists", bucketName); return EntityType.BUCKET.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketHandler, path); - } else { // length > 3. check dir or key existence + omMetadataManager, reconSCM, bucketHandler, path); + } else { + LOG.debug("names length is greater than 2"); String volName = names[0]; String bucketName = names[1]; String keyName = BucketHandler.getKeyName(names); bucketHandler = BucketHandler.getBucketHandler( - reconNamespaceSummaryManager, - omMetadataManager, reconSCM, - volName, bucketName); + reconNamespaceSummaryManager, + omMetadataManager, reconSCM, + volName, bucketName); // check if either volume or bucket doesn't exist if (bucketHandler == null || !volumeExists(omMetadataManager, volName) || !bucketHandler.bucketExists(volName, bucketName)) { return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); } return bucketHandler.determineKeyPath(keyName) .create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketHandler, path); + omMetadataManager, reconSCM, bucketHandler, path); } + } /** diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java new file mode 100644 index 000000000000..ca8e2cd80d4f --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -0,0 +1,318 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon.api.handlers; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.api.types.EntityType; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.helpers.OzoneFSUtils.removeTrailingSlashIfNeeded; + +/** + * Class for handling Legacy buckets. + */ +public class OBSBucketHandler extends BucketHandler{ + + private static final Logger LOG = LoggerFactory.getLogger( + OBSBucketHandler.class); + + private final String vol; + private final String bucket; + private final OmBucketInfo omBucketInfo; + + public OBSBucketHandler( + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager, + OzoneStorageContainerManager reconSCM, + OmBucketInfo bucketInfo) { + super(reconNamespaceSummaryManager, omMetadataManager, + reconSCM); + this.omBucketInfo = bucketInfo; + this.vol = omBucketInfo.getVolumeName(); + this.bucket = omBucketInfo.getBucketName(); + } + + /** + * Helper function to check if a path is a key, or invalid. + * + * @param keyName key name + * @return KEY, or UNKNOWN + * @throws IOException + */ + public EntityType determineKeyPath(String keyName) + throws IOException { + + String filename = OzoneFSUtils.removeTrailingSlashIfNeeded(keyName); + + String key = OM_KEY_PREFIX + vol + + OM_KEY_PREFIX + bucket + + OM_KEY_PREFIX + filename; + + Table keyTable = getKeyTable(); + + TableIterator> + iterator = keyTable.iterator(); + + iterator.seek(key); + if (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); + if (dbKey.equals(key)) { + return EntityType.KEY; + } + } + return EntityType.UNKNOWN; + } + + /** + * KeyTable's key is in the format of "vol/bucket/keyName". + * Make use of RocksDB's order to seek to the prefix and avoid full iteration. + * Calculating DU only for keys. Skipping any directories and + * handling only direct keys. + * + * @param parentId + * @return total DU of direct keys under object + * @throws IOException + */ + public long calculateDUUnderObject(long parentId) + throws IOException { + Table keyTable = getKeyTable(); + + long totalDU = 0L; + TableIterator> + iterator = keyTable.iterator(); + + String seekPrefix = OM_KEY_PREFIX + + vol + + OM_KEY_PREFIX + + bucket + + OM_KEY_PREFIX; + + NSSummary nsSummary = getReconNamespaceSummaryManager() + .getNSSummary(parentId); + // empty bucket + if (nsSummary == null) { + return 0; + } + + if (omBucketInfo.getObjectID() != parentId) { + String dirName = nsSummary.getDirName(); + seekPrefix += dirName; + } + + String[] seekKeys = seekPrefix.split(OM_KEY_PREFIX); + iterator.seek(seekPrefix); + // handle direct keys + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); + // since the RocksDB is ordered, seek until the prefix isn't matched + if (!dbKey.startsWith(seekPrefix)) { + break; + } + + String[] keys = dbKey.split(OM_KEY_PREFIX); + + // iteration moved to the next level + // and not handling direct keys + if (keys.length - seekKeys.length > 1) { + continue; + } + + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + // skip directory markers, just include directKeys + if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { + continue; + } + totalDU += keyInfo.getReplicatedSize(); + } + } + + + return totalDU; + } + + /** + * This method handles disk usage of direct keys. + * + * @param parentId parent bucket + * @param withReplica if withReplica is enabled, set sizeWithReplica + * for each direct key's DU + * @param listFile if listFile is enabled, append key DU as a subpath + * @param duData the current DU data + * @param normalizedPath the normalized path request + * @return the total DU of all direct keys + * @throws IOException IOE + */ + public long handleDirectKeys(long parentId, boolean withReplica, + boolean listFile, + List duData, + String normalizedPath) throws IOException { + + Table keyTable = getKeyTable(); + long keyDataSizeWithReplica = 0L; + + TableIterator> + iterator = keyTable.iterator(); + + String seekPrefix = OM_KEY_PREFIX + + vol + + OM_KEY_PREFIX + + bucket + + OM_KEY_PREFIX; + + NSSummary nsSummary = getReconNamespaceSummaryManager() + .getNSSummary(parentId); + // empty bucket + if (nsSummary == null) { + return 0; + } + + String[] seekKeys = seekPrefix.split(OM_KEY_PREFIX); + iterator.seek(seekPrefix); + + while (iterator.hasNext()) { + // KeyName : OmKeyInfo-Object + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); + + if (!dbKey.startsWith(seekPrefix)) { + break; + } + + String[] keys = dbKey.split(OM_KEY_PREFIX); + + // iteration moved to the next level + // and not handling direct keys + if (keys.length - seekKeys.length > 1) { + continue; + } + + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + // skip directories by checking if they end with '/' + // just include directKeys + if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { + continue; + } + DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); + String subpath = buildSubpath(normalizedPath, + keyInfo.getFileName()); + diskUsage.setSubpath(subpath); + diskUsage.setKey(true); + diskUsage.setSize(keyInfo.getDataSize()); + + if (withReplica) { + long keyDU = keyInfo.getReplicatedSize(); + keyDataSizeWithReplica += keyDU; + diskUsage.setSizeWithReplica(keyDU); + } + // list the key as a subpath + if (listFile) { + duData.add(diskUsage); + } + } + } + + return keyDataSizeWithReplica; + } + + /** + * Given a valid path request for a directory, + * return the directory object ID. + * + * @param names parsed path request in a list of names + * @return directory object ID + */ + public long getDirObjectId(String[] names) throws IOException { + return getDirObjectId(names, names.length); + } + + /** + * Given a valid path request and a cutoff length where should be iterated + * up to. + * return the directory object ID for the object at the cutoff length + * + * @param names parsed path request in a list of names + * @param cutoff cannot be larger than the names' length. If equals, + * return the directory object id for the whole path + * @return directory object ID + */ + public long getDirObjectId(String[] names, int cutoff) throws IOException { + long dirObjectId = getBucketObjectId(names); + StringBuilder bld = new StringBuilder(); + for (int i = 0; i < cutoff; ++i) { + bld.append(OM_KEY_PREFIX) + .append(names[i]); + } + bld.append(OM_KEY_PREFIX); + String dirKey = bld.toString(); + OmKeyInfo dirInfo = getKeyTable().getSkipCache(dirKey); + + if (dirInfo != null) { + dirObjectId = dirInfo.getObjectID(); + } else { + throw new IOException("OmKeyInfo for the directory is null"); + } + + return dirObjectId; + } + + + public BucketLayout getBucketLayout() { + return BucketLayout.LEGACY; + } + + + public OmKeyInfo getKeyInfo(String[] names) throws IOException { + String ozoneKey = OM_KEY_PREFIX; + ozoneKey += String.join(OM_KEY_PREFIX, names); + + OmKeyInfo keyInfo = getKeyTable().getSkipCache(ozoneKey); + return keyInfo; + } + + // In OBS buckets we don't have the concept of Directories + @Override + public OmDirectoryInfo getDirInfo(String[] names) throws IOException { + return null; + } + + public Table getKeyTable() { + Table keyTable = + getOmMetadataManager().getKeyTable(getBucketLayout()); + return keyTable; + } + +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index 61cc8ad10c25..c995ad0ea2c6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.recon.tasks; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java new file mode 100644 index 000000000000..71a9bb28f6a7 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + +/** + * Class for handling OBS specific tasks. + */ +public class NSSummaryTaskWithOBS extends NSSummaryTaskDbEventHandler { + + private static final BucketLayout BUCKET_LAYOUT = BucketLayout.OBJECT_STORE; + + private static final Logger LOG = + LoggerFactory.getLogger(NSSummaryTaskWithOBS.class); + + private boolean enableFileSystemPaths; + + public NSSummaryTaskWithOBS(ReconNamespaceSummaryManager + reconNamespaceSummaryManager, + ReconOMMetadataManager + reconOMMetadataManager, + OzoneConfiguration + ozoneConfiguration) { + super(reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); + // true if FileSystemPaths enabled + enableFileSystemPaths = ozoneConfiguration + .getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); + } + + + public boolean reprocessWithOBS(OMMetadataManager omMetadataManager) { + Map nsSummaryMap = new HashMap<>(); + + try { + Table keyTable = + omMetadataManager.getKeyTable(BUCKET_LAYOUT); + + try (TableIterator> + keyTableIter = keyTable.iterator()) { + + while (keyTableIter.hasNext()) { + Table.KeyValue kv = keyTableIter.next(); + OmKeyInfo keyInfo = kv.getValue(); + + // KeyTable entries belong to both Legacy and OBS buckets. + // Check bucket layout and if it's Legacy-FS + // continue to the next iteration. + String volumeName = keyInfo.getVolumeName(); + String bucketName = keyInfo.getBucketName(); + String bucketDBKey = omMetadataManager + .getBucketKey(volumeName, bucketName); + // Get bucket info from bucket table + OmBucketInfo omBucketInfo = omMetadataManager + .getBucketTable().getSkipCache(bucketDBKey); + + if (!omBucketInfo.getBucketLayout() + .isObjectStore(enableFileSystemPaths)) { + continue; + } + + setKeyParentID(keyInfo); + + handlePutKeyEvent(keyInfo, nsSummaryMap); + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } + } + } catch (IOException ioEx) { + LOG.error("Unable to reprocess Namespace Summary data in Recon DB. ", + ioEx); + return false; + } + + // flush and commit left out entries at end + if (!flushAndCommitNSToDB(nsSummaryMap)) { + return false; + } + LOG.info("Completed a reprocess run of NSSummaryTaskWithOBS"); + return true; + } + + /** + * KeyTable entries don't have the parentId set. + * In order to reuse the existing FSO methods that rely on + * the parentId, we have to set it explicitly. + * @param keyInfo + * @throws IOException + */ + private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { + System.out.println("#### INSIDE NSSummaryTaskWithOBS #### "); + // keyPath: [key1-legacy] + // OM_KEY_PREFIX: / + System.out.println("keyName: " + keyInfo.getKeyName()); + System.out.println("OM_KEY_PREFIX: " + OM_KEY_PREFIX); + + String bucketKey = getReconOMMetadataManager() + .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + + // bucketKey: /s3v/legacy-bucket + System.out.println("bucketKey: " + bucketKey); + + OmBucketInfo parentBucketInfo = + getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + + System.out.println("parentBucketInfo: " + parentBucketInfo); + + if (parentBucketInfo != null) { + keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithOBS is null"); + } + System.out.println("#### GOING OUTSIDE NSSummaryTaskWithOBS #### "); + + } + +} \ No newline at end of file From a47e09f6a688ab507cd0b1f2f3a0acc6ef2d70c7 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 23 Oct 2023 15:26:35 +0530 Subject: [PATCH 02/31] Rebased the PR --- .../recon/api/handlers/BucketHandler.java | 4 +- .../recon/api/handlers/EntityHandler.java | 22 ++------- .../recon/api/handlers/OBSBucketHandler.java | 47 +++++-------------- .../ozone/recon/tasks/NSSummaryTask.java | 13 +++++ .../recon/tasks/NSSummaryTaskWithOBS.java | 40 ++++++++-------- 5 files changed, 49 insertions(+), 77 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 09cbf4fe4e40..34dcba40f81b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -178,9 +178,7 @@ public static BucketHandler getBucketHandler( omMetadataManager, reconSCM, bucketInfo); } else if (bucketInfo.getBucketLayout() .equals(BucketLayout.OBJECT_STORE)) { - // TODO: HDDS-7810 Write a handler for object store bucket - // We can use LegacyBucketHandler for OBS bucket for now. - return new LegacyBucketHandler(reconNamespaceSummaryManager, + return new OBSBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); } else { LOG.error("Unsupported bucket layout: " + diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index 94799fd041fe..94d6f646f101 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -37,7 +37,6 @@ import java.io.IOException; -import java.util.Arrays; import java.util.List; import java.util.ArrayList; import java.util.Set; @@ -124,7 +123,6 @@ public String[] getNames() { * @param path the original path request used to identify root level * @return the entity handler of client's request */ - public static EntityHandler getEntityHandler( ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager, @@ -133,34 +131,24 @@ public static EntityHandler getEntityHandler( BucketHandler bucketHandler; String normalizedPath = normalizePath(path); - LOG.debug("normalizedPath: {}", normalizedPath); - String[] names = parseRequestPath(normalizedPath); - LOG.debug("names: {}", Arrays.toString(names)); - if (path.equals(OM_KEY_PREFIX)) { - LOG.debug("path is OM_KEY_PREFIX"); return EntityType.ROOT.create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, null, path); } if (names.length == 0) { - LOG.debug("names length is 0"); return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, null, path); - } else if (names.length == 1) { - LOG.debug("names length is 1"); + } else if (names.length == 1) { // volume level check String volName = names[0]; if (!volumeExists(omMetadataManager, volName)) { - LOG.debug("volume {} doesn't exist", volName); return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, null, path); } - LOG.debug("volume {} exists", volName); return EntityType.VOLUME.create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, null, path); - } else if (names.length == 2) { - LOG.debug("names length is 2"); + } else if (names.length == 2) { // bucket level check String volName = names[0]; String bucketName = names[1]; @@ -171,15 +159,12 @@ public static EntityHandler getEntityHandler( if (bucketHandler == null || !bucketHandler.bucketExists(volName, bucketName)) { - LOG.debug("bucket {} doesn't exist", bucketName); return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, null, path); } - LOG.debug("bucket {} exists", bucketName); return EntityType.BUCKET.create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketHandler, path); - } else { - LOG.debug("names length is greater than 2"); + } else { // length > 3. check dir or key existence String volName = names[0]; String bucketName = names[1]; @@ -201,7 +186,6 @@ public static EntityHandler getEntityHandler( .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketHandler, path); } - } /** diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java index ca8e2cd80d4f..3b1ba49801a8 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.ozone.recon.api.handlers; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.container.ContainerManager; + import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.api.types.EntityType; import org.apache.hadoop.ozone.recon.api.types.NSSummary; @@ -33,15 +35,13 @@ import java.io.IOException; import java.util.List; -import java.util.Set; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.om.helpers.OzoneFSUtils.removeTrailingSlashIfNeeded; /** * Class for handling Legacy buckets. */ -public class OBSBucketHandler extends BucketHandler{ +public class OBSBucketHandler extends BucketHandler { private static final Logger LOG = LoggerFactory.getLogger( OBSBucketHandler.class); @@ -71,12 +71,9 @@ public OBSBucketHandler( */ public EntityType determineKeyPath(String keyName) throws IOException { - - String filename = OzoneFSUtils.removeTrailingSlashIfNeeded(keyName); - String key = OM_KEY_PREFIX + vol + OM_KEY_PREFIX + bucket + - OM_KEY_PREFIX + filename; + OM_KEY_PREFIX + keyName; Table keyTable = getKeyTable(); @@ -159,7 +156,6 @@ public long calculateDUUnderObject(long parentId) } } - return totalDU; } @@ -249,20 +245,17 @@ public long handleDirectKeys(long parentId, boolean withReplica, } /** - * Given a valid path request for a directory, - * return the directory object ID. + * Object stores do not support directories, hence return null. * * @param names parsed path request in a list of names * @return directory object ID */ public long getDirObjectId(String[] names) throws IOException { - return getDirObjectId(names, names.length); + return Long.parseLong(null); } /** - * Given a valid path request and a cutoff length where should be iterated - * up to. - * return the directory object ID for the object at the cutoff length + * Object stores do not support directories, hence return null. * * @param names parsed path request in a list of names * @param cutoff cannot be larger than the names' length. If equals, @@ -270,28 +263,12 @@ public long getDirObjectId(String[] names) throws IOException { * @return directory object ID */ public long getDirObjectId(String[] names, int cutoff) throws IOException { - long dirObjectId = getBucketObjectId(names); - StringBuilder bld = new StringBuilder(); - for (int i = 0; i < cutoff; ++i) { - bld.append(OM_KEY_PREFIX) - .append(names[i]); - } - bld.append(OM_KEY_PREFIX); - String dirKey = bld.toString(); - OmKeyInfo dirInfo = getKeyTable().getSkipCache(dirKey); - - if (dirInfo != null) { - dirObjectId = dirInfo.getObjectID(); - } else { - throw new IOException("OmKeyInfo for the directory is null"); - } - - return dirObjectId; + return Long.parseLong(null); } public BucketLayout getBucketLayout() { - return BucketLayout.LEGACY; + return BucketLayout.OBJECT_STORE; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index c995ad0ea2c6..3e445c8e9793 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.recon.tasks; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -66,6 +67,7 @@ public class NSSummaryTask implements ReconOmTask { private final ReconOMMetadataManager reconOMMetadataManager; private final NSSummaryTaskWithFSO nsSummaryTaskWithFSO; private final NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy; + private final NSSummaryTaskWithOBS nsSummaryTaskWithOBS; private final OzoneConfiguration ozoneConfiguration; @Inject @@ -84,6 +86,9 @@ public NSSummaryTask(ReconNamespaceSummaryManager this.nsSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy( reconNamespaceSummaryManager, reconOMMetadataManager, ozoneConfiguration); + this.nsSummaryTaskWithOBS = new NSSummaryTaskWithOBS( + reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); } @Override @@ -100,6 +105,12 @@ public Pair process(OMUpdateEventBatch events) { } else { LOG.error("processWithFSO failed."); } + if (success) { + success = nsSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + } else { + LOG.error("processWithFSO failed."); + } + return new ImmutablePair<>(getTaskName(), success); } @@ -120,6 +131,8 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { .reprocessWithFSO(omMetadataManager)); tasks.add(() -> nsSummaryTaskWithLegacy .reprocessWithLegacy(reconOMMetadataManager)); + tasks.add(() -> nsSummaryTaskWithOBS + .reprocessWithOBS(reconOMMetadataManager)); List> results; ExecutorService executorService = Executors diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java index 71a9bb28f6a7..d18ea0212787 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java @@ -23,7 +23,9 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -31,13 +33,10 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.Arrays; import java.util.HashMap; -import java.util.Iterator; import java.util.Map; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; /** * Class for handling OBS specific tasks. @@ -52,11 +51,11 @@ public class NSSummaryTaskWithOBS extends NSSummaryTaskDbEventHandler { private boolean enableFileSystemPaths; public NSSummaryTaskWithOBS(ReconNamespaceSummaryManager - reconNamespaceSummaryManager, + reconNamespaceSummaryManager, ReconOMMetadataManager - reconOMMetadataManager, + reconOMMetadataManager, OzoneConfiguration - ozoneConfiguration) { + ozoneConfiguration) { super(reconNamespaceSummaryManager, reconOMMetadataManager, ozoneConfiguration); // true if FileSystemPaths enabled @@ -74,7 +73,7 @@ public boolean reprocessWithOBS(OMMetadataManager omMetadataManager) { omMetadataManager.getKeyTable(BUCKET_LAYOUT); try (TableIterator> - keyTableIter = keyTable.iterator()) { + keyTableIter = keyTable.iterator()) { while (keyTableIter.hasNext()) { Table.KeyValue kv = keyTableIter.next(); @@ -122,6 +121,7 @@ public boolean reprocessWithOBS(OMMetadataManager omMetadataManager) { * KeyTable entries don't have the parentId set. * In order to reuse the existing FSO methods that rely on * the parentId, we have to set it explicitly. + * * @param keyInfo * @throws IOException */ @@ -132,22 +132,22 @@ private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { System.out.println("keyName: " + keyInfo.getKeyName()); System.out.println("OM_KEY_PREFIX: " + OM_KEY_PREFIX); - String bucketKey = getReconOMMetadataManager() - .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + String bucketKey = getReconOMMetadataManager() + .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); - // bucketKey: /s3v/legacy-bucket - System.out.println("bucketKey: " + bucketKey); + // bucketKey: /s3v/legacy-bucket + System.out.println("bucketKey: " + bucketKey); - OmBucketInfo parentBucketInfo = - getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + OmBucketInfo parentBucketInfo = + getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); - System.out.println("parentBucketInfo: " + parentBucketInfo); + System.out.println("parentBucketInfo: " + parentBucketInfo); - if (parentBucketInfo != null) { - keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); - } else { - throw new IOException("ParentKeyInfo for " + - "NSSummaryTaskWithOBS is null"); + if (parentBucketInfo != null) { + keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithOBS is null"); } System.out.println("#### GOING OUTSIDE NSSummaryTaskWithOBS #### "); From 0c2005f0e594031de56ae7eee624e5394cd9628a Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 25 Oct 2023 13:56:41 +0530 Subject: [PATCH 03/31] New changes --- .../ozone/recon/tasks/NSSummaryTask.java | 2 +- .../tasks/NSSummaryTaskDbEventHandler.java | 27 ++++++ .../recon/tasks/NSSummaryTaskWithLegacy.java | 88 ++++++++++++------- .../tasks/TestNSSummaryTaskWithLegacy.java | 2 +- 4 files changed, 86 insertions(+), 33 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index 3e445c8e9793..d4b057585e0f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -101,7 +101,7 @@ public Pair process(OMUpdateEventBatch events) { boolean success; success = nsSummaryTaskWithFSO.processWithFSO(events); if (success) { - success = nsSummaryTaskWithLegacy.processWithLegacy(events); + success = nsSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); } else { LOG.error("processWithFSO failed."); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index f00d83e64a52..6dffc33723a6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -113,6 +113,33 @@ protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map nsSummaryMap) throws IOException { + long parentObjectId = keyInfo.getParentObjectID(); + // Try to get the NSSummary from our local map that maps NSSummaries to IDs + NSSummary nsSummary = nsSummaryMap.get(parentObjectId); + if (nsSummary == null) { + // If we don't have it in this batch we try to get it from the DB + nsSummary = reconNamespaceSummaryManager.getNSSummary(parentObjectId); + } + if (nsSummary == null) { + // If we don't have it locally and in the DB we create a new instance + // as this is a new ID + nsSummary = new NSSummary(); + } + int numOfFile = nsSummary.getNumOfFiles(); + long sizeOfFile = nsSummary.getSizeOfFiles(); + int[] fileBucket = nsSummary.getFileSizeBucket(); + nsSummary.setNumOfFiles(numOfFile + 1); + long dataSize = keyInfo.getDataSize(); + nsSummary.setSizeOfFiles(sizeOfFile + dataSize); + int binIndex = ReconUtils.getFileSizeBinIndex(dataSize); + ++fileBucket[binIndex]; + nsSummary.setFileSizeBucket(fileBucket); + nsSummary.addChildDir(keyInfo.getObjectID()); + nsSummaryMap.put(parentObjectId, nsSummary); + } + protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, Map nsSummaryMap) throws IOException { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java index ec1ccd0542fc..c011d42d0711 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java @@ -233,6 +233,13 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { if (omBucketInfo.getBucketLayout() .isObjectStore(enableFileSystemPaths)) { + // For OBS buckets and Legacy buckets with enable file system paths + // set to TRUE, the only parent they can have is the bucket itself. + setParentObjectIDForBucket(keyInfo); + handlePutKeyEventForObjectStoreLayout(keyInfo, nsSummaryMap); + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } continue; } @@ -278,39 +285,58 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { String[] keyPath = keyInfo.getKeyName().split(OM_KEY_PREFIX); - // If the path contains only one key then keyPath.length - // will be 1 and the parent will be a bucket. - // If the keyPath.length is greater than 1 then - // there is at least one directory. if (keyPath.length > 1) { - String[] dirs = Arrays.copyOf(keyPath, keyPath.length - 1); - String parentKeyName = String.join(OM_KEY_PREFIX, dirs); - parentKeyName += OM_KEY_PREFIX; - String fullParentKeyName = - getReconOMMetadataManager().getOzoneKey(keyInfo.getVolumeName(), - keyInfo.getBucketName(), parentKeyName); - OmKeyInfo parentKeyInfo = getReconOMMetadataManager() - .getKeyTable(BUCKET_LAYOUT) - .getSkipCache(fullParentKeyName); - - if (parentKeyInfo != null) { - keyInfo.setParentObjectID(parentKeyInfo.getObjectID()); - } else { - throw new IOException("ParentKeyInfo for " + - "NSSummaryTaskWithLegacy is null"); - } + setParentObjectIDForDirectory(keyInfo, keyPath); } else { - String bucketKey = getReconOMMetadataManager() - .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); - OmBucketInfo parentBucketInfo = - getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); - - if (parentBucketInfo != null) { - keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); - } else { - throw new IOException("ParentKeyInfo for " + - "NSSummaryTaskWithLegacy is null"); - } + setParentObjectIDForBucket(keyInfo); + } + } + + /** + * Set the parent object ID for a directory. + * @param keyInfo + * @param keyPath + * @throws IOException + */ + private void setParentObjectIDForDirectory(OmKeyInfo keyInfo, + String[] keyPath) + throws IOException { + String[] dirs = Arrays.copyOf(keyPath, keyPath.length - 1); + String parentKeyName = String.join(OM_KEY_PREFIX, dirs); + parentKeyName += OM_KEY_PREFIX; + String fullParentKeyName = + getReconOMMetadataManager().getOzoneKey(keyInfo.getVolumeName(), + keyInfo.getBucketName(), parentKeyName); + OmKeyInfo parentKeyInfo = getReconOMMetadataManager() + .getKeyTable(BUCKET_LAYOUT) + .getSkipCache(fullParentKeyName); + + if (parentKeyInfo != null) { + keyInfo.setParentObjectID(parentKeyInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithLegacy is null"); + } + } + + /** + * Set the parent object ID for a bucket. + * @param keyInfo + * @throws IOException + */ + private void setParentObjectIDForBucket(OmKeyInfo keyInfo) + throws IOException { + String bucketKey = getReconOMMetadataManager() + .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + OmBucketInfo parentBucketInfo = + getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + + if (parentBucketInfo != null) { + keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithLegacy is null"); } } + } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java index 1e32db78da34..763b4b184702 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java @@ -739,6 +739,6 @@ private static void initializeNewOmMetadataManager( } private static BucketLayout getBucketLayout() { - return BucketLayout.LEGACY; + return BucketLayout.OBJECT_STORE; } } From 6031ad5baa81b6958fe9a8e63dedbd9e65ec5797 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 29 Oct 2023 18:39:26 +0530 Subject: [PATCH 04/31] Fixed NSSummaryTaskWithOBS reprocess method --- .../ozone/recon/api/OMDBInsightEndpoint.java | 154 +++++++++++++++++- .../recon/api/handlers/OBSBucketHandler.java | 130 ++++----------- .../recon/tasks/NSSummaryTaskWithLegacy.java | 88 ++++------ .../recon/tasks/NSSummaryTaskWithOBS.java | 3 +- 4 files changed, 211 insertions(+), 164 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 37d6ae42b5c7..ec93a422ee97 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -24,8 +24,10 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; @@ -48,11 +50,11 @@ import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileWriter; import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; @@ -159,6 +161,149 @@ public OMDBInsightEndpoint(OzoneStorageContainerManager reconSCM, * } */ + public void omTableIterator() { + Map directoryMap = readDirectoryTable(); + Map fileMap = readFileTable(); + Map keyMap = readKeyTable(); + Map containerMap = readContainerTable(); + Map nSSummaryMap = readNSSummaryTable(); + + String outputPath = "output/"; + String directoryPath = outputPath + "directoryTable.txt"; + String filePath = outputPath + "fileTable.txt"; + String keyPath = outputPath + "keyTable.txt"; + String containerPath = outputPath + "containerTable.txt"; + String nSSummaryPath = outputPath + "nSSummaryTable.txt"; + + createOutputFolder(outputPath); + + writeTableToFile(directoryPath, directoryMap, (key, value) -> + key + " | " + value.getObjectID() + " | " + value.getParentObjectID()); + + writeTableToFile(filePath, fileMap, (key, value) -> key); + + writeTableToFile(keyPath, keyMap, (key, value) -> key + " | " + value.getKeyName()); + + writeTableToFile(containerPath, containerMap, (key, value) -> + key.getKeyPrefix() + " | " + key.getContainerId()); + +// writeTableToFile(nSSummaryPath, nSSummaryMap, (key, value) -> key + " | " + value.getDirName()); + } + + private void createOutputFolder(String outputPath) { + File outputFolder = new File(outputPath); + if (!outputFolder.exists()) { + boolean success = outputFolder.mkdirs(); + if (!success) { + System.err.println("Failed to create output folder."); + return; + } + } + } + + + private void writeTableToFile(String filePath, Map table, TableEntryFormatter formatter) { + File file = new File(filePath); + try (BufferedWriter writer = new BufferedWriter(new FileWriter(file))) { + for (Map.Entry entry : table.entrySet()) { + String key = entry.getKey().toString(); + String value = formatter.format(entry.getKey(), entry.getValue()); + writer.write(key + " | " + value + "\n"); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + + private interface TableEntryFormatter { + String format(K key, V value); + } + + private Map readNSSummaryTable() { + Map nSSummaryMap = new LinkedHashMap<>(); + try (TableIterator> iterator = + reconNamespaceSummaryManager.getNSSummaryTable().iterator()) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String key = kv.getKey().toString(); + NSSummary value = kv.getValue(); + nSSummaryMap.put(key, value); + System.out.println("key: " + key + " childDir: " + value.getChildDir() + " " + value.getDirName()); + } + } catch (IOException e) { + e.printStackTrace(); + } + return nSSummaryMap; + } + + private Map readDirectoryTable() { + Map directoryMap = new LinkedHashMap<>(); + try (TableIterator> iterator = + omMetadataManager.getDirectoryTable().iterator()) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String key = kv.getKey(); + OmDirectoryInfo value = kv.getValue(); + directoryMap.put(key, value); + System.out.println("key: " + key + " value: " + value.getName() + " objectId : " + value.getObjectID()); + } + } catch (IOException e) { + e.printStackTrace(); + } + return directoryMap; + } + + private Map readFileTable() { + Map fileMap = new LinkedHashMap<>(); + try (TableIterator> iterator = + omMetadataManager.getFileTable().iterator()) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String key = kv.getKey(); + OmKeyInfo value = kv.getValue(); + fileMap.put(key, value); + System.out.println("key: " + key + " value: " + value.getKeyName()); + } + } catch (IOException e) { + e.printStackTrace(); + } + return fileMap; + } + + private Map readKeyTable() { + Map keyMap = new LinkedHashMap<>(); + try (TableIterator> iterator = + omMetadataManager.getKeyTable(BucketLayout.LEGACY).iterator()) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String key = kv.getKey(); + OmKeyInfo value = kv.getValue(); + keyMap.put(key, value); + System.out.println("key: " + key + " value: " + value.getKeyName()); + } + } catch (IOException e) { + e.printStackTrace(); + } + return keyMap; + } + + private Map readContainerTable() { + Map containerMap = new LinkedHashMap<>(); + try (TableIterator> containerIterator = + reconContainerMetadataManager.getContainerTableIterator()) { + while (containerIterator.hasNext()) { + Table.KeyValue kv = containerIterator.next(); + ContainerKeyPrefix key = kv.getKey(); + Integer value = kv.getValue(); + containerMap.put(key, value); + System.out.println("key: " + key + " value: " + value); + } + } catch (IOException e) { + e.printStackTrace(); + } + return containerMap; + } + @GET @Path("/open") public Response getOpenKeyInfo( @@ -172,6 +317,7 @@ public Response getOpenKeyInfo( @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_NON_FSO) @QueryParam(RECON_OPEN_KEY_INCLUDE_NON_FSO) boolean includeNonFso) { + omTableIterator(); KeyInsightInfoResponse openKeyInsightInfo = new KeyInsightInfoResponse(); List nonFSOKeyInfoList = openKeyInsightInfo.getNonFSOKeyInfoList(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java index 3b1ba49801a8..a6553da11a7b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -91,81 +91,14 @@ public EntityType determineKeyPath(String keyName) return EntityType.UNKNOWN; } - /** - * KeyTable's key is in the format of "vol/bucket/keyName". - * Make use of RocksDB's order to seek to the prefix and avoid full iteration. - * Calculating DU only for keys. Skipping any directories and - * handling only direct keys. - * - * @param parentId - * @return total DU of direct keys under object - * @throws IOException - */ - public long calculateDUUnderObject(long parentId) - throws IOException { - Table keyTable = getKeyTable(); - - long totalDU = 0L; - TableIterator> - iterator = keyTable.iterator(); - - String seekPrefix = OM_KEY_PREFIX + - vol + - OM_KEY_PREFIX + - bucket + - OM_KEY_PREFIX; - - NSSummary nsSummary = getReconNamespaceSummaryManager() - .getNSSummary(parentId); - // empty bucket - if (nsSummary == null) { - return 0; - } - - if (omBucketInfo.getObjectID() != parentId) { - String dirName = nsSummary.getDirName(); - seekPrefix += dirName; - } - - String[] seekKeys = seekPrefix.split(OM_KEY_PREFIX); - iterator.seek(seekPrefix); - // handle direct keys - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - String dbKey = kv.getKey(); - // since the RocksDB is ordered, seek until the prefix isn't matched - if (!dbKey.startsWith(seekPrefix)) { - break; - } - - String[] keys = dbKey.split(OM_KEY_PREFIX); - - // iteration moved to the next level - // and not handling direct keys - if (keys.length - seekKeys.length > 1) { - continue; - } - - OmKeyInfo keyInfo = kv.getValue(); - if (keyInfo != null) { - // skip directory markers, just include directKeys - if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { - continue; - } - totalDU += keyInfo.getReplicatedSize(); - } - } - - return totalDU; - } - /** * This method handles disk usage of direct keys. * - * @param parentId parent bucket + * @param parentId parent OBS bucket * @param withReplica if withReplica is enabled, set sizeWithReplica * for each direct key's DU - * @param listFile if listFile is enabled, append key DU as a subpath + * @param listFile if listFile is enabled, append key DU as a children + * keys * @param duData the current DU data * @param normalizedPath the normalized path request * @return the total DU of all direct keys @@ -190,12 +123,11 @@ public long handleDirectKeys(long parentId, boolean withReplica, NSSummary nsSummary = getReconNamespaceSummaryManager() .getNSSummary(parentId); - // empty bucket + // Handle the case of an empty bucket. if (nsSummary == null) { return 0; } - String[] seekKeys = seekPrefix.split(OM_KEY_PREFIX); iterator.seek(seekPrefix); while (iterator.hasNext()) { @@ -203,29 +135,16 @@ public long handleDirectKeys(long parentId, boolean withReplica, Table.KeyValue kv = iterator.next(); String dbKey = kv.getKey(); + // Exit loop if the key doesn't match the seekPrefix. if (!dbKey.startsWith(seekPrefix)) { break; } - String[] keys = dbKey.split(OM_KEY_PREFIX); - - // iteration moved to the next level - // and not handling direct keys - if (keys.length - seekKeys.length > 1) { - continue; - } - OmKeyInfo keyInfo = kv.getValue(); if (keyInfo != null) { - // skip directories by checking if they end with '/' - // just include directKeys - if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { - continue; - } DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); - String subpath = buildSubpath(normalizedPath, - keyInfo.getFileName()); - diskUsage.setSubpath(subpath); + String objectName = keyInfo.getKeyName(); + diskUsage.setSubpath(objectName); diskUsage.setKey(true); diskUsage.setSize(keyInfo.getDataSize()); @@ -234,7 +153,7 @@ public long handleDirectKeys(long parentId, boolean withReplica, keyDataSizeWithReplica += keyDU; diskUsage.setSizeWithReplica(keyDU); } - // list the key as a subpath + // List all the keys for the OBS bucket if requested. if (listFile) { duData.add(diskUsage); } @@ -247,28 +166,29 @@ public long handleDirectKeys(long parentId, boolean withReplica, /** * Object stores do not support directories, hence return null. * - * @param names parsed path request in a list of names - * @return directory object ID + * @return null */ - public long getDirObjectId(String[] names) throws IOException { + public long calculateDUUnderObject(long parentId) + throws IOException { return Long.parseLong(null); } /** * Object stores do not support directories, hence return null. * - * @param names parsed path request in a list of names - * @param cutoff cannot be larger than the names' length. If equals, - * return the directory object id for the whole path - * @return directory object ID + * @return null */ - public long getDirObjectId(String[] names, int cutoff) throws IOException { + public long getDirObjectId(String[] names) throws IOException { return Long.parseLong(null); } - - public BucketLayout getBucketLayout() { - return BucketLayout.OBJECT_STORE; + /** + * Object stores do not support directories, hence return null. + * + * @return null + */ + public long getDirObjectId(String[] names, int cutoff) throws IOException { + return Long.parseLong(null); } @@ -280,7 +200,11 @@ public OmKeyInfo getKeyInfo(String[] names) throws IOException { return keyInfo; } - // In OBS buckets we don't have the concept of Directories + /** + * Object stores do not support directories, hence return null. + * + * @return null + */ @Override public OmDirectoryInfo getDirInfo(String[] names) throws IOException { return null; @@ -292,4 +216,8 @@ public Table getKeyTable() { return keyTable; } + public BucketLayout getBucketLayout() { + return BucketLayout.OBJECT_STORE; + } + } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java index c011d42d0711..ec1ccd0542fc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java @@ -233,13 +233,6 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { if (omBucketInfo.getBucketLayout() .isObjectStore(enableFileSystemPaths)) { - // For OBS buckets and Legacy buckets with enable file system paths - // set to TRUE, the only parent they can have is the bucket itself. - setParentObjectIDForBucket(keyInfo); - handlePutKeyEventForObjectStoreLayout(keyInfo, nsSummaryMap); - if (!checkAndCallFlushToDB(nsSummaryMap)) { - return false; - } continue; } @@ -285,58 +278,39 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { String[] keyPath = keyInfo.getKeyName().split(OM_KEY_PREFIX); + // If the path contains only one key then keyPath.length + // will be 1 and the parent will be a bucket. + // If the keyPath.length is greater than 1 then + // there is at least one directory. if (keyPath.length > 1) { - setParentObjectIDForDirectory(keyInfo, keyPath); - } else { - setParentObjectIDForBucket(keyInfo); - } - } - - /** - * Set the parent object ID for a directory. - * @param keyInfo - * @param keyPath - * @throws IOException - */ - private void setParentObjectIDForDirectory(OmKeyInfo keyInfo, - String[] keyPath) - throws IOException { - String[] dirs = Arrays.copyOf(keyPath, keyPath.length - 1); - String parentKeyName = String.join(OM_KEY_PREFIX, dirs); - parentKeyName += OM_KEY_PREFIX; - String fullParentKeyName = - getReconOMMetadataManager().getOzoneKey(keyInfo.getVolumeName(), - keyInfo.getBucketName(), parentKeyName); - OmKeyInfo parentKeyInfo = getReconOMMetadataManager() - .getKeyTable(BUCKET_LAYOUT) - .getSkipCache(fullParentKeyName); - - if (parentKeyInfo != null) { - keyInfo.setParentObjectID(parentKeyInfo.getObjectID()); - } else { - throw new IOException("ParentKeyInfo for " + - "NSSummaryTaskWithLegacy is null"); - } - } - - /** - * Set the parent object ID for a bucket. - * @param keyInfo - * @throws IOException - */ - private void setParentObjectIDForBucket(OmKeyInfo keyInfo) - throws IOException { - String bucketKey = getReconOMMetadataManager() - .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); - OmBucketInfo parentBucketInfo = - getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); - - if (parentBucketInfo != null) { - keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + String[] dirs = Arrays.copyOf(keyPath, keyPath.length - 1); + String parentKeyName = String.join(OM_KEY_PREFIX, dirs); + parentKeyName += OM_KEY_PREFIX; + String fullParentKeyName = + getReconOMMetadataManager().getOzoneKey(keyInfo.getVolumeName(), + keyInfo.getBucketName(), parentKeyName); + OmKeyInfo parentKeyInfo = getReconOMMetadataManager() + .getKeyTable(BUCKET_LAYOUT) + .getSkipCache(fullParentKeyName); + + if (parentKeyInfo != null) { + keyInfo.setParentObjectID(parentKeyInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithLegacy is null"); + } } else { - throw new IOException("ParentKeyInfo for " + - "NSSummaryTaskWithLegacy is null"); + String bucketKey = getReconOMMetadataManager() + .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + OmBucketInfo parentBucketInfo = + getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + + if (parentBucketInfo != null) { + keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithLegacy is null"); + } } } - } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java index d18ea0212787..5db3589305f6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java @@ -90,8 +90,7 @@ public boolean reprocessWithOBS(OMMetadataManager omMetadataManager) { OmBucketInfo omBucketInfo = omMetadataManager .getBucketTable().getSkipCache(bucketDBKey); - if (!omBucketInfo.getBucketLayout() - .isObjectStore(enableFileSystemPaths)) { + if (omBucketInfo.getBucketLayout() != BucketLayout.OBJECT_STORE) { continue; } From 4c28bc9e7cc7374e79108b1b553fbb4578c0ebe2 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 29 Oct 2023 19:42:22 +0530 Subject: [PATCH 05/31] Refactored NSSummaryTaskWithOBS and removed unnecessary methods --- .../recon/tasks/NSSummaryTaskWithOBS.java | 42 +++++-------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java index 5db3589305f6..ff852caea30c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -36,7 +35,6 @@ import java.util.HashMap; import java.util.Map; -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** * Class for handling OBS specific tasks. @@ -48,20 +46,13 @@ public class NSSummaryTaskWithOBS extends NSSummaryTaskDbEventHandler { private static final Logger LOG = LoggerFactory.getLogger(NSSummaryTaskWithOBS.class); - private boolean enableFileSystemPaths; - public NSSummaryTaskWithOBS(ReconNamespaceSummaryManager - reconNamespaceSummaryManager, - ReconOMMetadataManager - reconOMMetadataManager, - OzoneConfiguration - ozoneConfiguration) { + public NSSummaryTaskWithOBS( + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager reconOMMetadataManager, + OzoneConfiguration ozoneConfiguration) { super(reconNamespaceSummaryManager, reconOMMetadataManager, ozoneConfiguration); - // true if FileSystemPaths enabled - enableFileSystemPaths = ozoneConfiguration - .getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, - OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); } @@ -80,7 +71,7 @@ public boolean reprocessWithOBS(OMMetadataManager omMetadataManager) { OmKeyInfo keyInfo = kv.getValue(); // KeyTable entries belong to both Legacy and OBS buckets. - // Check bucket layout and if it's Legacy-FS + // Check bucket layout and if it's anything other than OBS, // continue to the next iteration. String volumeName = keyInfo.getVolumeName(); String bucketName = keyInfo.getBucketName(); @@ -118,38 +109,27 @@ public boolean reprocessWithOBS(OMMetadataManager omMetadataManager) { /** * KeyTable entries don't have the parentId set. - * In order to reuse the existing FSO methods that rely on + * In order to reuse the existing methods that rely on * the parentId, we have to set it explicitly. + * Note: For an OBS key, the parentId will always correspond to the ID of the + * OBS bucket in which it is located. * * @param keyInfo * @throws IOException */ - private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { - System.out.println("#### INSIDE NSSummaryTaskWithOBS #### "); - // keyPath: [key1-legacy] - // OM_KEY_PREFIX: / - System.out.println("keyName: " + keyInfo.getKeyName()); - System.out.println("OM_KEY_PREFIX: " + OM_KEY_PREFIX); - + private void setKeyParentID(OmKeyInfo keyInfo) + throws IOException { String bucketKey = getReconOMMetadataManager() .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); - - // bucketKey: /s3v/legacy-bucket - System.out.println("bucketKey: " + bucketKey); - OmBucketInfo parentBucketInfo = getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); - System.out.println("parentBucketInfo: " + parentBucketInfo); - if (parentBucketInfo != null) { keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); } else { throw new IOException("ParentKeyInfo for " + - "NSSummaryTaskWithOBS is null"); + "NSSummaryTaskWithLegacy is null"); } - System.out.println("#### GOING OUTSIDE NSSummaryTaskWithOBS #### "); - } } \ No newline at end of file From fd817521e5bff1064dba2e5882cbf7707244f91e Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 29 Oct 2023 22:40:37 +0530 Subject: [PATCH 06/31] Added process() method to NSSummaryTaskWithOBS --- .../ozone/recon/tasks/NSSummaryTask.java | 5 +- .../recon/tasks/NSSummaryTaskWithOBS.java | 96 +++++++++++++++++++ 2 files changed, 99 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index d4b057585e0f..dbdf27cbc98a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -101,12 +101,13 @@ public Pair process(OMUpdateEventBatch events) { boolean success; success = nsSummaryTaskWithFSO.processWithFSO(events); if (success) { - success = nsSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); + + success = nsSummaryTaskWithLegacy.processWithLegacy(events); } else { LOG.error("processWithFSO failed."); } if (success) { - success = nsSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + success = nsSummaryTaskWithOBS.processWithOBS(events); } else { LOG.error("processWithFSO failed."); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java index ff852caea30c..1636be20dac7 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java @@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -33,8 +34,11 @@ import java.io.IOException; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + /** * Class for handling OBS specific tasks. @@ -107,6 +111,98 @@ public boolean reprocessWithOBS(OMMetadataManager omMetadataManager) { return true; } + public boolean processWithOBS(OMUpdateEventBatch events) { + Iterator eventIterator = events.getIterator(); + Map nsSummaryMap = new HashMap<>(); + + while (eventIterator.hasNext()) { + OMDBUpdateEvent omdbUpdateEvent = + eventIterator.next(); + OMDBUpdateEvent.OMDBUpdateAction action = omdbUpdateEvent.getAction(); + + // We only process updates on OM's KeyTable + String table = omdbUpdateEvent.getTable(); + boolean updateOnKeyTable = table.equals(KEY_TABLE); + if (!updateOnKeyTable) { + continue; + } + + String updatedKey = omdbUpdateEvent.getKey(); + + try { + OMDBUpdateEvent keyTableUpdateEvent = omdbUpdateEvent; + Object value = keyTableUpdateEvent.getValue(); + Object oldValue = keyTableUpdateEvent.getOldValue(); + if (!(value instanceof OmKeyInfo)) { + LOG.warn("Unexpected value type {} for key {}. Skipping processing.", + value.getClass().getName(), updatedKey); + continue; + } + OmKeyInfo updatedKeyInfo = (OmKeyInfo) value; + OmKeyInfo oldKeyInfo = (OmKeyInfo) oldValue; + + // KeyTable entries belong to both OBS and Legacy buckets. + // Check bucket layout and if it's anything other than OBS, + // continue to the next iteration. + String volumeName = updatedKeyInfo.getVolumeName(); + String bucketName = updatedKeyInfo.getBucketName(); + String bucketDBKey = + getReconOMMetadataManager().getBucketKey(volumeName, bucketName); + // Get bucket info from bucket table + OmBucketInfo omBucketInfo = getReconOMMetadataManager().getBucketTable() + .getSkipCache(bucketDBKey); + + if (omBucketInfo.getBucketLayout() != BucketLayout.OBJECT_STORE) { + continue; + } + + setKeyParentID(updatedKeyInfo); + + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + case UPDATE: + if (oldKeyInfo != null) { + // delete first, then put + setKeyParentID(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKey); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + default: + LOG.debug("Skipping DB update event: {}", action); + } + + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } catch (IOException ioEx) { + LOG.error("Unable to process Namespace Summary data in Recon DB. ", + ioEx); + return false; + } + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } + + // Flush and commit left-out entries at the end + if (!flushAndCommitNSToDB(nsSummaryMap)) { + return false; + } + + LOG.info("Completed a process run of NSSummaryTaskWithOBS"); + return true; + } + + /** * KeyTable entries don't have the parentId set. * In order to reuse the existing methods that rely on From 606044ab1410eb142444de011228138d08ed5333 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 29 Oct 2023 22:46:50 +0530 Subject: [PATCH 07/31] Removed unnecessary debugging code --- .../ozone/recon/api/OMDBInsightEndpoint.java | 154 +----------------- .../ozone/recon/tasks/NSSummaryTask.java | 4 +- .../tasks/NSSummaryTaskDbEventHandler.java | 27 --- 3 files changed, 5 insertions(+), 180 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index ec93a422ee97..37d6ae42b5c7 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -24,10 +24,8 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; @@ -50,11 +48,11 @@ import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; import java.io.IOException; -import java.util.*; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; @@ -161,149 +159,6 @@ public OMDBInsightEndpoint(OzoneStorageContainerManager reconSCM, * } */ - public void omTableIterator() { - Map directoryMap = readDirectoryTable(); - Map fileMap = readFileTable(); - Map keyMap = readKeyTable(); - Map containerMap = readContainerTable(); - Map nSSummaryMap = readNSSummaryTable(); - - String outputPath = "output/"; - String directoryPath = outputPath + "directoryTable.txt"; - String filePath = outputPath + "fileTable.txt"; - String keyPath = outputPath + "keyTable.txt"; - String containerPath = outputPath + "containerTable.txt"; - String nSSummaryPath = outputPath + "nSSummaryTable.txt"; - - createOutputFolder(outputPath); - - writeTableToFile(directoryPath, directoryMap, (key, value) -> - key + " | " + value.getObjectID() + " | " + value.getParentObjectID()); - - writeTableToFile(filePath, fileMap, (key, value) -> key); - - writeTableToFile(keyPath, keyMap, (key, value) -> key + " | " + value.getKeyName()); - - writeTableToFile(containerPath, containerMap, (key, value) -> - key.getKeyPrefix() + " | " + key.getContainerId()); - -// writeTableToFile(nSSummaryPath, nSSummaryMap, (key, value) -> key + " | " + value.getDirName()); - } - - private void createOutputFolder(String outputPath) { - File outputFolder = new File(outputPath); - if (!outputFolder.exists()) { - boolean success = outputFolder.mkdirs(); - if (!success) { - System.err.println("Failed to create output folder."); - return; - } - } - } - - - private void writeTableToFile(String filePath, Map table, TableEntryFormatter formatter) { - File file = new File(filePath); - try (BufferedWriter writer = new BufferedWriter(new FileWriter(file))) { - for (Map.Entry entry : table.entrySet()) { - String key = entry.getKey().toString(); - String value = formatter.format(entry.getKey(), entry.getValue()); - writer.write(key + " | " + value + "\n"); - } - } catch (IOException e) { - e.printStackTrace(); - } - } - - private interface TableEntryFormatter { - String format(K key, V value); - } - - private Map readNSSummaryTable() { - Map nSSummaryMap = new LinkedHashMap<>(); - try (TableIterator> iterator = - reconNamespaceSummaryManager.getNSSummaryTable().iterator()) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - String key = kv.getKey().toString(); - NSSummary value = kv.getValue(); - nSSummaryMap.put(key, value); - System.out.println("key: " + key + " childDir: " + value.getChildDir() + " " + value.getDirName()); - } - } catch (IOException e) { - e.printStackTrace(); - } - return nSSummaryMap; - } - - private Map readDirectoryTable() { - Map directoryMap = new LinkedHashMap<>(); - try (TableIterator> iterator = - omMetadataManager.getDirectoryTable().iterator()) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - String key = kv.getKey(); - OmDirectoryInfo value = kv.getValue(); - directoryMap.put(key, value); - System.out.println("key: " + key + " value: " + value.getName() + " objectId : " + value.getObjectID()); - } - } catch (IOException e) { - e.printStackTrace(); - } - return directoryMap; - } - - private Map readFileTable() { - Map fileMap = new LinkedHashMap<>(); - try (TableIterator> iterator = - omMetadataManager.getFileTable().iterator()) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - String key = kv.getKey(); - OmKeyInfo value = kv.getValue(); - fileMap.put(key, value); - System.out.println("key: " + key + " value: " + value.getKeyName()); - } - } catch (IOException e) { - e.printStackTrace(); - } - return fileMap; - } - - private Map readKeyTable() { - Map keyMap = new LinkedHashMap<>(); - try (TableIterator> iterator = - omMetadataManager.getKeyTable(BucketLayout.LEGACY).iterator()) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - String key = kv.getKey(); - OmKeyInfo value = kv.getValue(); - keyMap.put(key, value); - System.out.println("key: " + key + " value: " + value.getKeyName()); - } - } catch (IOException e) { - e.printStackTrace(); - } - return keyMap; - } - - private Map readContainerTable() { - Map containerMap = new LinkedHashMap<>(); - try (TableIterator> containerIterator = - reconContainerMetadataManager.getContainerTableIterator()) { - while (containerIterator.hasNext()) { - Table.KeyValue kv = containerIterator.next(); - ContainerKeyPrefix key = kv.getKey(); - Integer value = kv.getValue(); - containerMap.put(key, value); - System.out.println("key: " + key + " value: " + value); - } - } catch (IOException e) { - e.printStackTrace(); - } - return containerMap; - } - @GET @Path("/open") public Response getOpenKeyInfo( @@ -317,7 +172,6 @@ public Response getOpenKeyInfo( @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_NON_FSO) @QueryParam(RECON_OPEN_KEY_INCLUDE_NON_FSO) boolean includeNonFso) { - omTableIterator(); KeyInsightInfoResponse openKeyInsightInfo = new KeyInsightInfoResponse(); List nonFSOKeyInfoList = openKeyInsightInfo.getNonFSOKeyInfoList(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index dbdf27cbc98a..baa170a3681d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -101,7 +101,6 @@ public Pair process(OMUpdateEventBatch events) { boolean success; success = nsSummaryTaskWithFSO.processWithFSO(events); if (success) { - success = nsSummaryTaskWithLegacy.processWithLegacy(events); } else { LOG.error("processWithFSO failed."); @@ -109,9 +108,8 @@ public Pair process(OMUpdateEventBatch events) { if (success) { success = nsSummaryTaskWithOBS.processWithOBS(events); } else { - LOG.error("processWithFSO failed."); + LOG.error("processWithOBS failed."); } - return new ImmutablePair<>(getTaskName(), success); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java index 6dffc33723a6..f00d83e64a52 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java @@ -113,33 +113,6 @@ protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map nsSummaryMap) throws IOException { - long parentObjectId = keyInfo.getParentObjectID(); - // Try to get the NSSummary from our local map that maps NSSummaries to IDs - NSSummary nsSummary = nsSummaryMap.get(parentObjectId); - if (nsSummary == null) { - // If we don't have it in this batch we try to get it from the DB - nsSummary = reconNamespaceSummaryManager.getNSSummary(parentObjectId); - } - if (nsSummary == null) { - // If we don't have it locally and in the DB we create a new instance - // as this is a new ID - nsSummary = new NSSummary(); - } - int numOfFile = nsSummary.getNumOfFiles(); - long sizeOfFile = nsSummary.getSizeOfFiles(); - int[] fileBucket = nsSummary.getFileSizeBucket(); - nsSummary.setNumOfFiles(numOfFile + 1); - long dataSize = keyInfo.getDataSize(); - nsSummary.setSizeOfFiles(sizeOfFile + dataSize); - int binIndex = ReconUtils.getFileSizeBinIndex(dataSize); - ++fileBucket[binIndex]; - nsSummary.setFileSizeBucket(fileBucket); - nsSummary.addChildDir(keyInfo.getObjectID()); - nsSummaryMap.put(parentObjectId, nsSummary); - } - protected void handlePutDirEvent(OmDirectoryInfo directoryInfo, Map nsSummaryMap) throws IOException { From 4fe5ef833cf51d9cbdb72b87080cf71d4f3da66b Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 30 Oct 2023 01:39:48 +0530 Subject: [PATCH 08/31] Added Unit test for reprocess() for NSSummaryTaskWithOBS --- .../tasks/TestNSSummaryTaskWithLegacy.java | 2 +- .../recon/tasks/TestNSSummaryTaskWithOBS.java | 348 ++++++++++++++++++ 2 files changed, 349 insertions(+), 1 deletion(-) create mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java index 763b4b184702..1e32db78da34 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java @@ -739,6 +739,6 @@ private static void initializeNewOmMetadataManager( } private static BucketLayout getBucketLayout() { - return BucketLayout.OBJECT_STORE; + return BucketLayout.LEGACY; } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java new file mode 100644 index 000000000000..b360ee1fc3cc --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -0,0 +1,348 @@ +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.NSSummaryEndpoint; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.junit.Assert; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import javax.ws.rs.core.Response; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Set; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.*; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; +import static org.mockito.Mockito.mock; + +public class TestNSSummaryTaskWithOBS { + private static ReconNamespaceSummaryManager reconNamespaceSummaryManager; + private static OMMetadataManager omMetadataManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static NSSummaryTaskWithOBS nSSummaryTaskWithOBS; + private static OzoneConfiguration omConfiguration; + + // Object names + private static final String VOL = "vol"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "key1"; + private static final String KEY_TWO = "key2"; + private static final String KEY_THREE = "dir1/dir2/key3"; + private static final String KEY_FOUR = "key4///////////"; + private static final String KEY_FIVE = "//////////"; + + private static final String TEST_USER = "TestUser"; + + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_OLD_SIZE = 1025L; + private static final long KEY_TWO_UPDATE_SIZE = 1023L; + private static final long KEY_THREE_SIZE = + ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L; + private static final long KEY_FOUR_SIZE = 2050L; + private static final long KEY_FIVE_SIZE = 100L; + + private static Set bucketOneAns = new HashSet<>(); + private static Set bucketTwoAns = new HashSet<>(); + private static Set dirOneAns = new HashSet<>(); + + private TestNSSummaryTaskWithOBS() { + } + + @BeforeAll + public static void setUp(@TempDir File tmpDir) throws Exception { + initializeNewOmMetadataManager(new File(tmpDir, "om")); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProvider(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + new File(tmpDir, "recon")); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(tmpDir) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .build(); + reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + + NSSummary nonExistentSummary = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + Assert.assertNull(nonExistentSummary); + + populateOMDB(); + + nSSummaryTaskWithOBS = new NSSummaryTaskWithOBS( + reconNamespaceSummaryManager, + reconOMMetadataManager, omConfiguration); + } + + /** + * Nested class for testing NSSummaryTaskWithOBS reprocess. + */ + @Nested + public class TestReprocess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + @BeforeEach + public void setUp() throws IOException { + // write a NSSummary prior to reprocess + // verify it got cleaned up after. + NSSummary staleNSSummary = new NSSummary(); + RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, -1L, + staleNSSummary); + reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation); + + // Verify commit + Assert.assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + + nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + Assert.assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + NSSummaryEndpoint nsSummaryEndpoint = new NSSummaryEndpoint( + reconNamespaceSummaryManager, reconOMMetadataManager, mock( + OzoneStorageContainerManager.class)); + + Response resp = nsSummaryEndpoint.getDiskUsage("/vol/bucket2",true,false); + DUResponse duDirReponse = (DUResponse) resp.getEntity(); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + Assert.assertNotNull(nsSummaryForBucket1); + Assert.assertNotNull(nsSummaryForBucket2); + } + + @Test + public void testReprocessNSSummaryNull() throws IOException { + Assert.assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + } + + @Test + public void testReprocessGetFiles() { + Assert.assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + Assert.assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); + + Assert.assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, + nsSummaryForBucket1.getSizeOfFiles()); + Assert.assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + @Test + public void testReprocessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + Assert.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + Assert.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {0, 1, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + Assert.assertEquals(1, fileDistBucket1[index]); + } else { + Assert.assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + Assert.assertEquals(1, fileDistBucket2[index]); + } else { + Assert.assertEquals(0, fileDistBucket2[index]); + } + } + } + + // Helper method to check if an array contains a specific value + private boolean contains(int[] arr, int value) { + for (int num : arr) { + if (num == value) { + return true; + } + } + return false; + } + + @Test + public void testReprocessBucketDirs() { + // None of the buckets have any child dirs because OBS is flat namespace. + Set childDirBucketOne = nsSummaryForBucket1.getChildDir(); + Set childDirBucketTwo = nsSummaryForBucket2.getChildDir(); + Assert.assertEquals(0, childDirBucketOne.size()); + Assert.assertEquals(0, childDirBucketTwo.size()); + } + + } + + /** + * Populate OMDB with the following configs. + * vol + * / \ + * bucket1 bucket2 + * / \ \ \ \ + * key1 key2 key3 key4 key5 + * + * @throws IOException + */ + private static void populateOMDB() throws IOException { + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_OLD_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FOUR_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FIVE_SIZE, + getBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * @throws IOException ioEx + */ + private static void initializeNewOmMetadataManager( + File omDbDir) + throws IOException { + omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + omMetadataManager.getVolumeTable().put(volumeKey, args); + + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + } + + private static BucketLayout getBucketLayout() { + return BucketLayout.OBJECT_STORE; + } +} From 17df34d6b4b76eccc6e586efce7b4b485c067424 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 30 Oct 2023 11:36:15 +0530 Subject: [PATCH 09/31] Added UT's for the process() method in NSSummaryWithOBS --- .../recon/tasks/TestNSSummaryTaskWithOBS.java | 225 +++++++++++++++++- 1 file changed, 215 insertions(+), 10 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index b360ee1fc3cc..ff76445342c7 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -56,6 +56,8 @@ public class TestNSSummaryTaskWithOBS { private static final String KEY_THREE = "dir1/dir2/key3"; private static final String KEY_FOUR = "key4///////////"; private static final String KEY_FIVE = "//////////"; + private static final String KEY_SIX = "key6"; + private static final String KEY_SEVEN = "key7"; private static final String TEST_USER = "TestUser"; @@ -68,6 +70,8 @@ public class TestNSSummaryTaskWithOBS { private static final long KEY_FOUR_OBJECT_ID = 6L; private static final long KEY_THREE_OBJECT_ID = 8L; private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_SIX_OBJECT_ID = 10L; + private static final long KEY_SEVEN_OBJECT_ID = 11L; private static final long KEY_ONE_SIZE = 500L; @@ -77,6 +81,8 @@ public class TestNSSummaryTaskWithOBS { ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L; private static final long KEY_FOUR_SIZE = 2050L; private static final long KEY_FIVE_SIZE = 100L; + private static final long KEY_SIX_SIZE = 6000L; + private static final long KEY_SEVEN_SIZE = 7000L; private static Set bucketOneAns = new HashSet<>(); private static Set bucketTwoAns = new HashSet<>(); @@ -203,16 +209,6 @@ public void testReprocessFileBucketSize() { } } - // Helper method to check if an array contains a specific value - private boolean contains(int[] arr, int value) { - for (int num : arr) { - if (num == value) { - return true; - } - } - return false; - } - @Test public void testReprocessBucketDirs() { // None of the buckets have any child dirs because OBS is flat namespace. @@ -224,6 +220,173 @@ public void testReprocessBucketDirs() { } + /** + * Nested class for testing NSSummaryTaskWithLegacy process. + */ + @Nested + public class TestProcess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + private OMDBUpdateEvent keyEvent1; + private OMDBUpdateEvent keyEvent2; + private OMDBUpdateEvent keyEvent3; + private OMDBUpdateEvent keyEvent4; + + @BeforeEach + public void setUp() throws IOException { + nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + nSSummaryTaskWithOBS.processWithOBS(processEventBatch()); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + Assert.assertNotNull(nsSummaryForBucket1); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + Assert.assertNotNull(nsSummaryForBucket2); + } + + private OMUpdateEventBatch processEventBatch() throws IOException { + // Test PUT Event. + // PUT Key6 in Bucket2. + String omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_TWO + + OM_KEY_PREFIX + KEY_SIX; + OmKeyInfo omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_TWO, KEY_SIX, + KEY_SIX, KEY_SIX_OBJECT_ID, BUCKET_TWO_OBJECT_ID, KEY_SIX_SIZE); + keyEvent1 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + // PUT Key7 in Bucket1. + omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_SEVEN; + omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_SEVEN, + KEY_SEVEN, KEY_SEVEN_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_SEVEN_SIZE); + keyEvent2 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + + // Test DELETE Event. + // Delete Key1 in Bucket1. + String omDeleteKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_ONE; + OmKeyInfo omDeleteKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_ONE, + KEY_ONE, KEY_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_ONE_SIZE); + keyEvent3 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omDeleteKey) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setValue(omDeleteKeyInfo) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE) + .build(); + + // Test UPDATE Event. + // Resize Key2 in Bucket1. + String omResizeKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_TWO; + OmKeyInfo oldOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE); + OmKeyInfo newOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE + 100); + keyEvent4 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omResizeKey) + .setOldValue(oldOmResizeKeyInfo) + .setValue(newOmResizeKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE) + .build(); + + OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch( + new ArrayList() {{ + add(keyEvent1); + add(keyEvent2); + add(keyEvent3); + add(keyEvent4); + }}); + + return omUpdateEventBatch; + } + + @Test + public void testProcessForCount() throws IOException { + Assert.assertNotNull(nsSummaryForBucket1); + Assert.assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + Assert.assertNotNull(nsSummaryForBucket2); + Assert.assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); + + Set childDirBucket1 = nsSummaryForBucket1.getChildDir(); + Assert.assertEquals(0, childDirBucket1.size()); + Set childDirBucket2 = nsSummaryForBucket2.getChildDir(); + Assert.assertEquals(0, childDirBucket2.size()); + } + + @Test + public void testProcessForSize() throws IOException { + Assert.assertNotNull(nsSummaryForBucket1); + Assert.assertEquals( + KEY_THREE_SIZE + KEY_SEVEN_SIZE + KEY_TWO_OLD_SIZE + 100, + nsSummaryForBucket1.getSizeOfFiles()); + Assert.assertNotNull(nsSummaryForBucket2); + Assert.assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + + @Test + public void testProcessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + Assert.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + Assert.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {1, 3, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + Assert.assertEquals(1, fileDistBucket1[index]); + } else { + Assert.assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2, 3}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + Assert.assertEquals(1, fileDistBucket2[index]); + } else { + Assert.assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + /** * Populate OMDB with the following configs. * vol @@ -342,6 +505,48 @@ private static void initializeNewOmMetadataManager( omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); } + /** + * Build a key info for put/update action. + * @param volume volume name + * @param bucket bucket name + * @param key key name + * @param fileName file name + * @param objectID object ID + * @param parentObjectId parent object ID + * @param dataSize file size + * @return the KeyInfo + */ + private static OmKeyInfo buildOmKeyInfo(String volume, + String bucket, + String key, + String fileName, + long objectID, + long parentObjectId, + long dataSize) { + return new OmKeyInfo.Builder() + .setBucketName(bucket) + .setVolumeName(volume) + .setKeyName(key) + .setFileName(fileName) + .setReplicationConfig( + StandaloneReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.ONE)) + .setObjectID(objectID) + .setParentObjectID(parentObjectId) + .setDataSize(dataSize) + .build(); + } + + // Helper method to check if an array contains a specific value + private boolean contains(int[] arr, int value) { + for (int num : arr) { + if (num == value) { + return true; + } + } + return false; + } + private static BucketLayout getBucketLayout() { return BucketLayout.OBJECT_STORE; } From da7f4e1925a4ab429e22f3fbae1035239205cf1a Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 7 Dec 2023 12:30:14 +0530 Subject: [PATCH 10/31] Fixed first set of review comments --- .../recon/api/handlers/EntityHandler.java | 6 ------ .../recon/api/handlers/FSOBucketHandler.java | 2 +- .../api/handlers/LegacyBucketHandler.java | 2 +- .../recon/api/handlers/OBSBucketHandler.java | 18 +++++++++++------- 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index 94d6f646f101..8888bf32d22d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -32,8 +32,6 @@ import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; @@ -48,10 +46,6 @@ */ public abstract class EntityHandler { - - private static final Logger LOG = LoggerFactory.getLogger( - EntityHandler.class); - private final ReconNamespaceSummaryManager reconNamespaceSummaryManager; private final ReconOMMetadataManager omMetadataManager; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java index 26cda6442d4e..8a1c5babe75e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java @@ -42,7 +42,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** - * Class for handling FSO buckets. + * Class for handling FSO buckets NameSpaceSummaries. */ public class FSOBucketHandler extends BucketHandler { private static final Logger LOG = diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java index 3dd1ddbdabb9..09f1c5bc7454 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java @@ -41,7 +41,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** - * Class for handling Legacy buckets. + * Class for handling Legacy buckets NameSpaceSummaries. */ public class LegacyBucketHandler extends BucketHandler { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java index a6553da11a7b..b4cd1ad72d0d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -30,8 +30,6 @@ import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.List; @@ -39,13 +37,10 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** - * Class for handling Legacy buckets. + * Class for handling OBS buckets NameSpaceSummaries. */ public class OBSBucketHandler extends BucketHandler { - private static final Logger LOG = LoggerFactory.getLogger( - OBSBucketHandler.class); - private final String vol; private final String bucket; private final OmBucketInfo omBucketInfo; @@ -69,6 +64,7 @@ public OBSBucketHandler( * @return KEY, or UNKNOWN * @throws IOException */ + @Override public EntityType determineKeyPath(String keyName) throws IOException { String key = OM_KEY_PREFIX + vol + @@ -104,6 +100,7 @@ public EntityType determineKeyPath(String keyName) * @return the total DU of all direct keys * @throws IOException IOE */ + @Override public long handleDirectKeys(long parentId, boolean withReplica, boolean listFile, List duData, @@ -168,6 +165,7 @@ public long handleDirectKeys(long parentId, boolean withReplica, * * @return null */ + @Override public long calculateDUUnderObject(long parentId) throws IOException { return Long.parseLong(null); @@ -178,6 +176,7 @@ public long calculateDUUnderObject(long parentId) * * @return null */ + @Override public long getDirObjectId(String[] names) throws IOException { return Long.parseLong(null); } @@ -187,11 +186,16 @@ public long getDirObjectId(String[] names) throws IOException { * * @return null */ + @Override public long getDirObjectId(String[] names, int cutoff) throws IOException { return Long.parseLong(null); } - + /** + * Returns the keyInfo object from the KEY table. + * @return OmKeyInfo + */ + @Override public OmKeyInfo getKeyInfo(String[] names) throws IOException { String ozoneKey = OM_KEY_PREFIX; ozoneKey += String.join(OM_KEY_PREFIX, names); From 37c04385967be500b2e37d34b760d526cedb1e81 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 11 Dec 2023 12:33:54 +0530 Subject: [PATCH 11/31] Took care of the review comments and also fixed the failing UT's --- .../ozone/recon/tasks/NSSummaryTask.java | 35 ++++++++++++------- .../recon/tasks/TestNSSummaryTaskWithOBS.java | 3 ++ qodana.yaml | 31 ++++++++++++++++ 3 files changed, 56 insertions(+), 13 deletions(-) create mode 100644 qodana.yaml diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index baa170a3681d..49dc604d123b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -98,16 +98,16 @@ public String getTaskName() { @Override public Pair process(OMUpdateEventBatch events) { - boolean success; - success = nsSummaryTaskWithFSO.processWithFSO(events); - if (success) { - success = nsSummaryTaskWithLegacy.processWithLegacy(events); - } else { + boolean success = nsSummaryTaskWithFSO.processWithFSO(events); + if (!success) { LOG.error("processWithFSO failed."); } - if (success) { - success = nsSummaryTaskWithOBS.processWithOBS(events); - } else { + success = nsSummaryTaskWithLegacy.processWithLegacy(events); + if (!success) { + LOG.error("processWithLegacy failed."); + } + success = nsSummaryTaskWithOBS.processWithOBS(events); + if (!success) { LOG.error("processWithOBS failed."); } return new ImmutablePair<>(getTaskName(), success); @@ -115,8 +115,11 @@ public Pair process(OMUpdateEventBatch events) { @Override public Pair reprocess(OMMetadataManager omMetadataManager) { + // Initialize a list of tasks to run in parallel Collection> tasks = new ArrayList<>(); + long startTime = System.nanoTime(); // Record start time + try { // reinit Recon RocksDB's namespace CF. reconNamespaceSummaryManager.clearNSSummaryTable(); @@ -135,7 +138,7 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { List> results; ExecutorService executorService = Executors - .newFixedThreadPool(2, + .newFixedThreadPool(3, new ThreadFactoryBuilder().setNameFormat("NSSummaryTask - %d") .build()); try { @@ -146,17 +149,23 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { } } } catch (InterruptedException ex) { - LOG.error("Error while reprocessing NSSummary " + - "table in Recon DB. ", ex); + LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex); return new ImmutablePair<>(getTaskName(), false); } catch (ExecutionException ex2) { - LOG.error("Error while reprocessing NSSummary " + - "table in Recon DB. ", ex2); + LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex2); return new ImmutablePair<>(getTaskName(), false); } finally { executorService.shutdown(); + + long endTime = System.nanoTime(); // Record end time + long durationInMillis = (endTime - startTime) / 1_000_000; // Convert to milliseconds + + // Log performance metrics + LOG.info("Task execution time: {} milliseconds", durationInMillis); } + return new ImmutablePair<>(getTaskName(), true); } + } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index ff76445342c7..1ac3cbcc34b2 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -236,6 +236,8 @@ public class TestProcess { @BeforeEach public void setUp() throws IOException { + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); nSSummaryTaskWithOBS.processWithOBS(processEventBatch()); @@ -368,6 +370,7 @@ public void testProcessFileBucketSize() { int[] expectedIndexes1 = {1, 3, 40}; for (int index = 0; index < fileDistBucket1.length; index++) { if (contains(expectedIndexes1, index)) { + System.out.println("######## index: " + index + " value: " + fileDistBucket1[index]+"########"); Assert.assertEquals(1, fileDistBucket1[index]); } else { Assert.assertEquals(0, fileDistBucket1[index]); diff --git a/qodana.yaml b/qodana.yaml new file mode 100644 index 000000000000..4e2698583c96 --- /dev/null +++ b/qodana.yaml @@ -0,0 +1,31 @@ +#-------------------------------------------------------------------------------# +# Qodana analysis is configured by qodana.yaml file # +# https://www.jetbrains.com/help/qodana/qodana-yaml.html # +#-------------------------------------------------------------------------------# +version: "1.0" + +#Specify inspection profile for code analysis +profile: + name: qodana.starter + +#Enable inspections +#include: +# - name: + +#Disable inspections +#exclude: +# - name: +# paths: +# - + +projectJDK: 8 #(Applied in CI/CD pipeline) + +#Execute shell command before Qodana execution (Applied in CI/CD pipeline) +#bootstrap: sh ./prepare-qodana.sh + +#Install IDE plugins before Qodana execution (Applied in CI/CD pipeline) +#plugins: +# - id: #(plugin id can be found at https://plugins.jetbrains.com) + +#Specify Qodana linter for analysis (Applied in CI/CD pipeline) +linter: jetbrains/qodana-jvm:latest From 2655425677f5128201ab052a9923c77bc2c23604 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 11 Dec 2023 19:46:02 +0530 Subject: [PATCH 12/31] Migrated junit for TestNSSummaryTaskWithOBS to jUnit5 --- .../recon/tasks/TestNSSummaryTaskWithOBS.java | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index 1ac3cbcc34b2..0691871c9d1d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -20,7 +20,7 @@ import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; -import org.junit.Assert; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Nested; @@ -111,7 +111,7 @@ public static void setUp(@TempDir File tmpDir) throws Exception { NSSummary nonExistentSummary = reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); - Assert.assertNull(nonExistentSummary); + Assertions.assertNull(nonExistentSummary); populateOMDB(); @@ -140,13 +140,13 @@ public void setUp() throws IOException { reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation); // Verify commit - Assert.assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + Assertions.assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); // reinit Recon RocksDB's namespace CF. reconNamespaceSummaryManager.clearNSSummaryTable(); nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); - Assert.assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + Assertions.assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); NSSummaryEndpoint nsSummaryEndpoint = new NSSummaryEndpoint( reconNamespaceSummaryManager, reconOMMetadataManager, mock( @@ -159,23 +159,23 @@ reconNamespaceSummaryManager, reconOMMetadataManager, mock( reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); nsSummaryForBucket2 = reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); - Assert.assertNotNull(nsSummaryForBucket1); - Assert.assertNotNull(nsSummaryForBucket2); + Assertions.assertNotNull(nsSummaryForBucket1); + Assertions.assertNotNull(nsSummaryForBucket2); } @Test public void testReprocessNSSummaryNull() throws IOException { - Assert.assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + Assertions.assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); } @Test public void testReprocessGetFiles() { - Assert.assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); - Assert.assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); + Assertions.assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + Assertions.assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); - Assert.assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, + Assertions.assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, nsSummaryForBucket1.getSizeOfFiles()); - Assert.assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, + Assertions.assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, nsSummaryForBucket2.getSizeOfFiles()); } @@ -183,18 +183,18 @@ public void testReprocessGetFiles() { public void testReprocessFileBucketSize() { int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); - Assert.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + Assertions.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, fileDistBucket1.length); - Assert.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + Assertions.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, fileDistBucket2.length); // Check for 1's and 0's in fileDistBucket1 int[] expectedIndexes1 = {0, 1, 40}; for (int index = 0; index < fileDistBucket1.length; index++) { if (contains(expectedIndexes1, index)) { - Assert.assertEquals(1, fileDistBucket1[index]); + Assertions.assertEquals(1, fileDistBucket1[index]); } else { - Assert.assertEquals(0, fileDistBucket1[index]); + Assertions.assertEquals(0, fileDistBucket1[index]); } } @@ -202,9 +202,9 @@ public void testReprocessFileBucketSize() { int[] expectedIndexes2 = {0, 2}; for (int index = 0; index < fileDistBucket2.length; index++) { if (contains(expectedIndexes2, index)) { - Assert.assertEquals(1, fileDistBucket2[index]); + Assertions.assertEquals(1, fileDistBucket2[index]); } else { - Assert.assertEquals(0, fileDistBucket2[index]); + Assertions.assertEquals(0, fileDistBucket2[index]); } } } @@ -214,8 +214,8 @@ public void testReprocessBucketDirs() { // None of the buckets have any child dirs because OBS is flat namespace. Set childDirBucketOne = nsSummaryForBucket1.getChildDir(); Set childDirBucketTwo = nsSummaryForBucket2.getChildDir(); - Assert.assertEquals(0, childDirBucketOne.size()); - Assert.assertEquals(0, childDirBucketTwo.size()); + Assertions.assertEquals(0, childDirBucketOne.size()); + Assertions.assertEquals(0, childDirBucketTwo.size()); } } @@ -243,10 +243,10 @@ public void setUp() throws IOException { nsSummaryForBucket1 = reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); - Assert.assertNotNull(nsSummaryForBucket1); + Assertions.assertNotNull(nsSummaryForBucket1); nsSummaryForBucket2 = reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); - Assert.assertNotNull(nsSummaryForBucket2); + Assertions.assertNotNull(nsSummaryForBucket2); } private OMUpdateEventBatch processEventBatch() throws IOException { @@ -334,25 +334,25 @@ private OMUpdateEventBatch processEventBatch() throws IOException { @Test public void testProcessForCount() throws IOException { - Assert.assertNotNull(nsSummaryForBucket1); - Assert.assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); - Assert.assertNotNull(nsSummaryForBucket2); - Assert.assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); + Assertions.assertNotNull(nsSummaryForBucket1); + Assertions.assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + Assertions.assertNotNull(nsSummaryForBucket2); + Assertions.assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); Set childDirBucket1 = nsSummaryForBucket1.getChildDir(); - Assert.assertEquals(0, childDirBucket1.size()); + Assertions.assertEquals(0, childDirBucket1.size()); Set childDirBucket2 = nsSummaryForBucket2.getChildDir(); - Assert.assertEquals(0, childDirBucket2.size()); + Assertions.assertEquals(0, childDirBucket2.size()); } @Test public void testProcessForSize() throws IOException { - Assert.assertNotNull(nsSummaryForBucket1); - Assert.assertEquals( + Assertions.assertNotNull(nsSummaryForBucket1); + Assertions.assertEquals( KEY_THREE_SIZE + KEY_SEVEN_SIZE + KEY_TWO_OLD_SIZE + 100, nsSummaryForBucket1.getSizeOfFiles()); - Assert.assertNotNull(nsSummaryForBucket2); - Assert.assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, + Assertions.assertNotNull(nsSummaryForBucket2); + Assertions.assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, nsSummaryForBucket2.getSizeOfFiles()); } @@ -361,9 +361,9 @@ public void testProcessForSize() throws IOException { public void testProcessFileBucketSize() { int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); - Assert.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + Assertions.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, fileDistBucket1.length); - Assert.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + Assertions.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, fileDistBucket2.length); // Check for 1's and 0's in fileDistBucket1 @@ -371,9 +371,9 @@ public void testProcessFileBucketSize() { for (int index = 0; index < fileDistBucket1.length; index++) { if (contains(expectedIndexes1, index)) { System.out.println("######## index: " + index + " value: " + fileDistBucket1[index]+"########"); - Assert.assertEquals(1, fileDistBucket1[index]); + Assertions.assertEquals(1, fileDistBucket1[index]); } else { - Assert.assertEquals(0, fileDistBucket1[index]); + Assertions.assertEquals(0, fileDistBucket1[index]); } } @@ -381,9 +381,9 @@ public void testProcessFileBucketSize() { int[] expectedIndexes2 = {0, 2, 3}; for (int index = 0; index < fileDistBucket2.length; index++) { if (contains(expectedIndexes2, index)) { - Assert.assertEquals(1, fileDistBucket2[index]); + Assertions.assertEquals(1, fileDistBucket2[index]); } else { - Assert.assertEquals(0, fileDistBucket2[index]); + Assertions.assertEquals(0, fileDistBucket2[index]); } } } From f610de0900cc8e144d14fa0d96b814be9fcc32a6 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 11 Dec 2023 19:49:18 +0530 Subject: [PATCH 13/31] Removed unecessary changes --- .../recon/api/handlers/EntityHandler.java | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index 99917682a11f..d12c7b6545ac 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -112,46 +112,46 @@ public String[] getNames() { * @return the entity handler of client's request */ public static EntityHandler getEntityHandler( - ReconNamespaceSummaryManager reconNamespaceSummaryManager, - ReconOMMetadataManager omMetadataManager, - OzoneStorageContainerManager reconSCM, - String path) throws IOException { + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager, + OzoneStorageContainerManager reconSCM, + String path) throws IOException { BucketHandler bucketHandler; String normalizedPath = normalizePath(path); String[] names = parseRequestPath(normalizedPath); if (path.equals(OM_KEY_PREFIX)) { return EntityType.ROOT.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); } if (names.length == 0) { return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); } else if (names.length == 1) { // volume level check String volName = names[0]; if (!omMetadataManager.volumeExists(volName)) { return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); } return EntityType.VOLUME.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); } else if (names.length == 2) { // bucket level check String volName = names[0]; String bucketName = names[1]; bucketHandler = BucketHandler.getBucketHandler( - reconNamespaceSummaryManager, - omMetadataManager, reconSCM, - volName, bucketName); + reconNamespaceSummaryManager, + omMetadataManager, reconSCM, + volName, bucketName); if (bucketHandler == null || !bucketHandler.bucketExists(volName, bucketName)) { return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); } return EntityType.BUCKET.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketHandler, path); + omMetadataManager, reconSCM, bucketHandler, path); } else { // length > 3. check dir or key existence String volName = names[0]; String bucketName = names[1]; @@ -159,20 +159,20 @@ public static EntityHandler getEntityHandler( String keyName = BucketHandler.getKeyName(names); bucketHandler = BucketHandler.getBucketHandler( - reconNamespaceSummaryManager, - omMetadataManager, reconSCM, - volName, bucketName); + reconNamespaceSummaryManager, + omMetadataManager, reconSCM, + volName, bucketName); // check if either volume or bucket doesn't exist if (bucketHandler == null || !omMetadataManager.volumeExists(volName) || !bucketHandler.bucketExists(volName, bucketName)) { return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); } return bucketHandler.determineKeyPath(keyName) .create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketHandler, path); + omMetadataManager, reconSCM, bucketHandler, path); } } From 9c53399c8637f520ba0fc21c5c69640c6c973e0a Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 29 Jan 2024 13:37:47 +0530 Subject: [PATCH 14/31] Made the latest review chanegs --- .../recon/api/handlers/OBSBucketHandler.java | 77 ++++++++++--------- .../ozone/recon/tasks/NSSummaryTask.java | 7 +- qodana.yaml | 31 -------- 3 files changed, 45 insertions(+), 70 deletions(-) delete mode 100644 qodana.yaml diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java index b4cd1ad72d0d..65f74ea14710 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -65,23 +65,23 @@ public OBSBucketHandler( * @throws IOException */ @Override - public EntityType determineKeyPath(String keyName) - throws IOException { + public EntityType determineKeyPath(String keyName) throws IOException { String key = OM_KEY_PREFIX + vol + OM_KEY_PREFIX + bucket + OM_KEY_PREFIX + keyName; Table keyTable = getKeyTable(); - TableIterator> - iterator = keyTable.iterator(); - - iterator.seek(key); - if (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - String dbKey = kv.getKey(); - if (dbKey.equals(key)) { - return EntityType.KEY; + try ( + TableIterator> + iterator = keyTable.iterator()) { + iterator.seek(key); + if (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); + if (dbKey.equals(key)) { + return EntityType.KEY; + } } } return EntityType.UNKNOWN; @@ -106,6 +106,13 @@ public long handleDirectKeys(long parentId, boolean withReplica, List duData, String normalizedPath) throws IOException { + NSSummary nsSummary = getReconNamespaceSummaryManager() + .getNSSummary(parentId); + // Handle the case of an empty bucket. + if (nsSummary == null) { + return 0; + } + Table keyTable = getKeyTable(); long keyDataSizeWithReplica = 0L; @@ -118,13 +125,6 @@ public long handleDirectKeys(long parentId, boolean withReplica, bucket + OM_KEY_PREFIX; - NSSummary nsSummary = getReconNamespaceSummaryManager() - .getNSSummary(parentId); - // Handle the case of an empty bucket. - if (nsSummary == null) { - return 0; - } - iterator.seek(seekPrefix); while (iterator.hasNext()) { @@ -161,34 +161,39 @@ public long handleDirectKeys(long parentId, boolean withReplica, } /** - * Object stores do not support directories, hence return null. + * Object stores do not support directories. * - * @return null + * @return UnsupportedOperationException */ @Override public long calculateDUUnderObject(long parentId) throws IOException { - return Long.parseLong(null); + throw new UnsupportedOperationException( + "Object stores do not support directories."); } /** - * Object stores do not support directories, hence return null. + * Object stores do not support directories. * - * @return null + * @return UnsupportedOperationException */ @Override - public long getDirObjectId(String[] names) throws IOException { - return Long.parseLong(null); + public long getDirObjectId(String[] names) + throws UnsupportedOperationException { + throw new UnsupportedOperationException( + "Object stores do not support directories."); } /** - * Object stores do not support directories, hence return null. + * Object stores do not support directories. * - * @return null + * @return UnsupportedOperationException */ @Override - public long getDirObjectId(String[] names, int cutoff) throws IOException { - return Long.parseLong(null); + public long getDirObjectId(String[] names, int cutoff) + throws UnsupportedOperationException { + throw new UnsupportedOperationException( + "Object stores do not support directories."); } /** @@ -200,24 +205,22 @@ public OmKeyInfo getKeyInfo(String[] names) throws IOException { String ozoneKey = OM_KEY_PREFIX; ozoneKey += String.join(OM_KEY_PREFIX, names); - OmKeyInfo keyInfo = getKeyTable().getSkipCache(ozoneKey); - return keyInfo; + return getKeyTable().getSkipCache(ozoneKey); } /** - * Object stores do not support directories, hence return null. + * Object stores do not support directories. * - * @return null + * @return UnsupportedOperationException */ @Override public OmDirectoryInfo getDirInfo(String[] names) throws IOException { - return null; + throw new UnsupportedOperationException( + "Object stores do not support directories."); } public Table getKeyTable() { - Table keyTable = - getOmMetadataManager().getKeyTable(getBucketLayout()); - return keyTable; + return getOmMetadataManager().getKeyTable(getBucketLayout()); } public BucketLayout getBucketLayout() { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index 760fc9cbc9f9..5c3395084464 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -38,6 +38,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; /** * Task to query data from OMDB and write into Recon RocksDB. @@ -159,8 +160,10 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { } finally { executorService.shutdown(); - long endTime = System.nanoTime(); // Record end time - long durationInMillis = (endTime - startTime) / 1_000_000; // Convert to milliseconds + long endTime = System.nanoTime(); + // Convert to milliseconds + long durationInMillis = + TimeUnit.NANOSECONDS.toMillis(endTime - startTime); // Log performance metrics LOG.info("Task execution time: {} milliseconds", durationInMillis); diff --git a/qodana.yaml b/qodana.yaml deleted file mode 100644 index 4e2698583c96..000000000000 --- a/qodana.yaml +++ /dev/null @@ -1,31 +0,0 @@ -#-------------------------------------------------------------------------------# -# Qodana analysis is configured by qodana.yaml file # -# https://www.jetbrains.com/help/qodana/qodana-yaml.html # -#-------------------------------------------------------------------------------# -version: "1.0" - -#Specify inspection profile for code analysis -profile: - name: qodana.starter - -#Enable inspections -#include: -# - name: - -#Disable inspections -#exclude: -# - name: -# paths: -# - - -projectJDK: 8 #(Applied in CI/CD pipeline) - -#Execute shell command before Qodana execution (Applied in CI/CD pipeline) -#bootstrap: sh ./prepare-qodana.sh - -#Install IDE plugins before Qodana execution (Applied in CI/CD pipeline) -#plugins: -# - id: #(plugin id can be found at https://plugins.jetbrains.com) - -#Specify Qodana linter for analysis (Applied in CI/CD pipeline) -linter: jetbrains/qodana-jvm:latest From c5d3b275b541013d1140f33f985b454ffae0ce6b Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 29 Jan 2024 13:55:07 +0530 Subject: [PATCH 15/31] Made review changes --- .../recon/api/handlers/OBSBucketHandler.java | 74 ++++++++++--------- 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java index 65f74ea14710..9528ae1b8704 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -116,43 +116,45 @@ public long handleDirectKeys(long parentId, boolean withReplica, Table keyTable = getKeyTable(); long keyDataSizeWithReplica = 0L; - TableIterator> - iterator = keyTable.iterator(); - - String seekPrefix = OM_KEY_PREFIX + - vol + - OM_KEY_PREFIX + - bucket + - OM_KEY_PREFIX; + try ( + TableIterator> + iterator = keyTable.iterator()) { - iterator.seek(seekPrefix); + String seekPrefix = OM_KEY_PREFIX + + vol + + OM_KEY_PREFIX + + bucket + + OM_KEY_PREFIX; - while (iterator.hasNext()) { - // KeyName : OmKeyInfo-Object - Table.KeyValue kv = iterator.next(); - String dbKey = kv.getKey(); + iterator.seek(seekPrefix); - // Exit loop if the key doesn't match the seekPrefix. - if (!dbKey.startsWith(seekPrefix)) { - break; - } + while (iterator.hasNext()) { + // KeyName : OmKeyInfo-Object + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); - OmKeyInfo keyInfo = kv.getValue(); - if (keyInfo != null) { - DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); - String objectName = keyInfo.getKeyName(); - diskUsage.setSubpath(objectName); - diskUsage.setKey(true); - diskUsage.setSize(keyInfo.getDataSize()); - - if (withReplica) { - long keyDU = keyInfo.getReplicatedSize(); - keyDataSizeWithReplica += keyDU; - diskUsage.setSizeWithReplica(keyDU); + // Exit loop if the key doesn't match the seekPrefix. + if (!dbKey.startsWith(seekPrefix)) { + break; } - // List all the keys for the OBS bucket if requested. - if (listFile) { - duData.add(diskUsage); + + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); + String objectName = keyInfo.getKeyName(); + diskUsage.setSubpath(objectName); + diskUsage.setKey(true); + diskUsage.setSize(keyInfo.getDataSize()); + + if (withReplica) { + long keyDU = keyInfo.getReplicatedSize(); + keyDataSizeWithReplica += keyDU; + diskUsage.setSizeWithReplica(keyDU); + } + // List all the keys for the OBS bucket if requested. + if (listFile) { + duData.add(diskUsage); + } } } } @@ -163,7 +165,7 @@ public long handleDirectKeys(long parentId, boolean withReplica, /** * Object stores do not support directories. * - * @return UnsupportedOperationException + * @throws UnsupportedOperationException */ @Override public long calculateDUUnderObject(long parentId) @@ -175,7 +177,7 @@ public long calculateDUUnderObject(long parentId) /** * Object stores do not support directories. * - * @return UnsupportedOperationException + * @throws UnsupportedOperationException */ @Override public long getDirObjectId(String[] names) @@ -187,7 +189,7 @@ public long getDirObjectId(String[] names) /** * Object stores do not support directories. * - * @return UnsupportedOperationException + * @throws UnsupportedOperationException */ @Override public long getDirObjectId(String[] names, int cutoff) @@ -211,7 +213,7 @@ public OmKeyInfo getKeyInfo(String[] names) throws IOException { /** * Object stores do not support directories. * - * @return UnsupportedOperationException + * @throws UnsupportedOperationException */ @Override public OmDirectoryInfo getDirInfo(String[] names) throws IOException { From 68ab58f90be03cabd9ba51221d65751b5e7608be Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 30 Jan 2024 17:32:51 +0530 Subject: [PATCH 16/31] Fixed failing CI checks in fork --- .../recon/api/handlers/OBSBucketHandler.java | 2 +- .../recon/tasks/NSSummaryTaskWithOBS.java | 2 +- .../recon/tasks/TestNSSummaryTaskWithOBS.java | 33 ++++++++++--------- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java index 9528ae1b8704..eedad9c717e3 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -229,4 +229,4 @@ public BucketLayout getBucketLayout() { return BucketLayout.OBJECT_STORE; } -} \ No newline at end of file +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java index 1636be20dac7..4585ae05f634 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java @@ -228,4 +228,4 @@ private void setKeyParentID(OmKeyInfo keyInfo) } } -} \ No newline at end of file +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index 0691871c9d1d..9be7e34c56c2 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -15,7 +15,6 @@ import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.api.NSSummaryEndpoint; -import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -27,7 +26,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import javax.ws.rs.core.Response; import java.io.File; import java.io.IOException; import java.util.ArrayList; @@ -36,11 +34,15 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.*; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; import static org.mockito.Mockito.mock; -public class TestNSSummaryTaskWithOBS { +/** + * Unit test for NSSummaryTaskWithOBS. + */ +public final class TestNSSummaryTaskWithOBS { private static ReconNamespaceSummaryManager reconNamespaceSummaryManager; private static OMMetadataManager omMetadataManager; private static ReconOMMetadataManager reconOMMetadataManager; @@ -152,9 +154,6 @@ public void setUp() throws IOException { reconNamespaceSummaryManager, reconOMMetadataManager, mock( OzoneStorageContainerManager.class)); - Response resp = nsSummaryEndpoint.getDiskUsage("/vol/bucket2",true,false); - DUResponse duDirReponse = (DUResponse) resp.getEntity(); - nsSummaryForBucket1 = reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); nsSummaryForBucket2 = @@ -321,13 +320,15 @@ private OMUpdateEventBatch processEventBatch() throws IOException { .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE) .build(); - OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch( - new ArrayList() {{ - add(keyEvent1); - add(keyEvent2); - add(keyEvent3); - add(keyEvent4); - }}); + OMUpdateEventBatch omUpdateEventBatch = + new OMUpdateEventBatch(new ArrayList() { + { + add(keyEvent1); + add(keyEvent2); + add(keyEvent3); + add(keyEvent4); + } + }); return omUpdateEventBatch; } @@ -370,7 +371,6 @@ public void testProcessFileBucketSize() { int[] expectedIndexes1 = {1, 3, 40}; for (int index = 0; index < fileDistBucket1.length; index++) { if (contains(expectedIndexes1, index)) { - System.out.println("######## index: " + index + " value: " + fileDistBucket1[index]+"########"); Assertions.assertEquals(1, fileDistBucket1[index]); } else { Assertions.assertEquals(0, fileDistBucket1[index]); @@ -462,6 +462,7 @@ private static void populateOMDB() throws IOException { /** * Create a new OM Metadata manager instance with one user, one vol, and two * buckets. + * * @throws IOException ioEx */ private static void initializeNewOmMetadataManager( From ddd1a3b660bb2c891e64abc02e553fee8677e0cb Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 30 Jan 2024 17:36:48 +0530 Subject: [PATCH 17/31] Added apache licence for class --- .../recon/tasks/TestNSSummaryTaskWithOBS.java | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index 9be7e34c56c2..2a6a992e88d6 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.ozone.recon.tasks; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; From 76597a44505edcf42a277f8c7abf12904f23be01 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 31 Jan 2024 11:28:18 +0530 Subject: [PATCH 18/31] Fixed checkstyle issues for HDDS-7810 --- .../ozone/recon/tasks/TestNSSummaryTaskWithOBS.java | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index 2a6a992e88d6..85533657dc95 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -32,7 +31,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.ReconTestInjector; -import org.apache.hadoop.ozone.recon.api.NSSummaryEndpoint; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; @@ -47,7 +45,6 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; -import java.util.HashSet; import java.util.Set; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; @@ -55,7 +52,6 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; -import static org.mockito.Mockito.mock; /** * Unit test for NSSummaryTaskWithOBS. @@ -104,10 +100,6 @@ public final class TestNSSummaryTaskWithOBS { private static final long KEY_SIX_SIZE = 6000L; private static final long KEY_SEVEN_SIZE = 7000L; - private static Set bucketOneAns = new HashSet<>(); - private static Set bucketTwoAns = new HashSet<>(); - private static Set dirOneAns = new HashSet<>(); - private TestNSSummaryTaskWithOBS() { } @@ -168,10 +160,6 @@ public void setUp() throws IOException { nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); Assertions.assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); - NSSummaryEndpoint nsSummaryEndpoint = new NSSummaryEndpoint( - reconNamespaceSummaryManager, reconOMMetadataManager, mock( - OzoneStorageContainerManager.class)); - nsSummaryForBucket1 = reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); nsSummaryForBucket2 = From 5ab09d3764039fce20ad8598be824417fffaa994 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 31 Jan 2024 17:05:42 +0530 Subject: [PATCH 19/31] Potential fix for serilization error --- .../ozone/recon/tasks/TestNSSummaryTaskWithOBS.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index 85533657dc95..b24c07f92014 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -44,19 +44,19 @@ import java.io.File; import java.io.IOException; +import java.io.Serializable; import java.util.ArrayList; import java.util.Set; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; - +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; /** * Unit test for NSSummaryTaskWithOBS. */ -public final class TestNSSummaryTaskWithOBS { +public final class TestNSSummaryTaskWithOBS implements Serializable { private static ReconNamespaceSummaryManager reconNamespaceSummaryManager; private static OMMetadataManager omMetadataManager; private static ReconOMMetadataManager reconOMMetadataManager; @@ -107,7 +107,7 @@ private TestNSSummaryTaskWithOBS() { public static void setUp(@TempDir File tmpDir) throws Exception { initializeNewOmMetadataManager(new File(tmpDir, "om")); OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = - getMockOzoneManagerServiceProvider(); + getMockOzoneManagerServiceProviderWithFSO(); reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, new File(tmpDir, "recon")); From ac2f0037d0a06b08b42fd13f1dfed3a182626b1d Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 31 Jan 2024 20:05:25 +0530 Subject: [PATCH 20/31] HDDS-7810 fixed the find bug problem --- .../recon/tasks/TestNSSummaryTaskWithOBS.java | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index b24c07f92014..f9d1b4957d24 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -46,6 +46,7 @@ import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; +import java.util.Arrays; import java.util.Set; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; @@ -326,17 +327,8 @@ private OMUpdateEventBatch processEventBatch() throws IOException { .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE) .build(); - OMUpdateEventBatch omUpdateEventBatch = - new OMUpdateEventBatch(new ArrayList() { - { - add(keyEvent1); - add(keyEvent2); - add(keyEvent3); - add(keyEvent4); - } - }); - - return omUpdateEventBatch; + return new OMUpdateEventBatch( + Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4)); } @Test From 297be0c340bafaad578bd13cb103822d596646ce Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 31 Jan 2024 20:09:03 +0530 Subject: [PATCH 21/31] Fixed checkstyle issues --- .../hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index f9d1b4957d24..ae69b055febd 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -45,7 +45,6 @@ import java.io.File; import java.io.IOException; import java.io.Serializable; -import java.util.ArrayList; import java.util.Arrays; import java.util.Set; From 01f817bf9ac04735c3e1d8b0f9a1ec9ca310bb03 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 7 Feb 2024 19:59:16 +0530 Subject: [PATCH 22/31] Fixed failing UT --- .../apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java index 6992c3100fb9..485804240d52 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java @@ -166,7 +166,7 @@ public void setUp() throws Exception { reconNamespaceSummaryManager.getNSSummary(BUCKET_THREE_OBJECT_ID); assertNotNull(nsSummaryForBucket1); assertNotNull(nsSummaryForBucket2); - assertNull(nsSummaryForBucket3); + assertNotNull(nsSummaryForBucket3); } @Test @@ -233,7 +233,7 @@ public void setUp() throws IOException { assertNotNull(nsSummaryForBucket2); nsSummaryForBucket3 = reconNamespaceSummaryManager.getNSSummary(BUCKET_THREE_OBJECT_ID); - assertNull(nsSummaryForBucket3); + assertNotNull(nsSummaryForBucket3); } private OMUpdateEventBatch processEventBatch() throws IOException { From 913662e0914ac924424a1453c9dd6e2db5e1e5d2 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 11 Feb 2024 20:36:19 +0530 Subject: [PATCH 23/31] Fixed failing UT's and also added tests for the NSSummaryEndpoint for OBS buckets --- .../recon/api/handlers/OBSBucketHandler.java | 50 +- .../api/TestNSSummaryEndpointWithOBS.java | 1110 +++++++++++++++++ 2 files changed, 1153 insertions(+), 7 deletions(-) create mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java index eedad9c717e3..024eec989a10 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -90,7 +90,7 @@ public EntityType determineKeyPath(String keyName) throws IOException { /** * This method handles disk usage of direct keys. * - * @param parentId parent OBS bucket + * @param parentId The identifier for the parent bucket. * @param withReplica if withReplica is enabled, set sizeWithReplica * for each direct key's DU * @param listFile if listFile is enabled, append key DU as a children @@ -163,15 +163,51 @@ public long handleDirectKeys(long parentId, boolean withReplica, } /** - * Object stores do not support directories. + * Calculates the total disk usage (DU) for an Object Store Bucket (OBS) by + * summing the sizes of all keys contained within the bucket. + * Since OBS buckets operate on a flat hierarchy, this method iterates through + * all the keys in the bucket without the need to traverse directories. * - * @throws UnsupportedOperationException + * @param parentId The identifier for the parent bucket. + * @return The total disk usage of all keys within the specified OBS bucket. + * @throws IOException */ @Override - public long calculateDUUnderObject(long parentId) - throws IOException { - throw new UnsupportedOperationException( - "Object stores do not support directories."); + public long calculateDUUnderObject(long parentId) throws IOException { + // Initialize the total disk usage variable. + long totalDU = 0L; + + // Access the key table for the bucket. + Table keyTable = getKeyTable(); + + try ( + TableIterator> + iterator = keyTable.iterator()) { + // Construct the seek prefix to filter keys under this bucket. + String seekPrefix = + OM_KEY_PREFIX + vol + OM_KEY_PREFIX + bucket + OM_KEY_PREFIX; + iterator.seek(seekPrefix); + + // Iterate over keys in the bucket. + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String keyName = kv.getKey(); + + // Break the loop if the current key does not start with the seekPrefix. + if (!keyName.startsWith(seekPrefix)) { + break; + } + + // Sum the size of each key to the total disk usage. + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + totalDU += keyInfo.getDataSize(); + } + } + } + + // Return the total disk usage of all keys in the bucket. + return totalDU; } /** diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java new file mode 100644 index 000000000000..fef23ef7e871 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java @@ -0,0 +1,1110 @@ +package org.apache.hadoop.ozone.recon.api; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; +import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; +import org.apache.hadoop.ozone.recon.api.types.*; +import org.apache.hadoop.ozone.recon.common.CommonUtils; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import javax.ws.rs.core.Response; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test for NSSummary REST APIs with OBS. + * Testing is done on a simple object store model with a flat hierarchy: + * vol + * / \ + * bucket1 bucket2 + * | | + * file1 file2 + * + * This tests the Rest APIs for NSSummary in the context of OBS buckets, + * focusing on disk usage, quota usage, and file size distribution. + */ +public class TestNSSummaryEndpointWithOBS { + @TempDir + private Path temporaryFolder; + + private ReconOMMetadataManager reconOMMetadataManager; + private NSSummaryEndpoint nsSummaryEndpoint; + private OzoneConfiguration conf; + private CommonUtils commonUtils; + + private static final String TEST_PATH_UTILITY = + "/vol1/buck1/a/b/c/d/e/file1.txt"; + private static final String PARENT_DIR = "vol1/buck1/a/b/c/d/e"; + private static final String[] TEST_NAMES = + new String[]{"vol1", "buck1", "a", "b", "c", "d", "e", "file1.txt"}; + private static final String TEST_KEY_NAMES = "a/b/c/d/e/file1.txt"; + + // Object names + private static final String VOL = "vol"; + private static final String VOL_TWO = "vol2"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String BUCKET_THREE = "bucket3"; + private static final String BUCKET_FOUR = "bucket4"; + private static final String KEY_ONE = "file1"; + private static final String KEY_TWO = "file2"; + private static final String KEY_THREE = "file3"; + private static final String KEY_FOUR = "file4"; + private static final String KEY_FIVE = "file5"; + private static final String KEY_SIX = "file6"; + private static final String KEY_SEVEN = "file7"; + private static final String KEY_EIGHT = "file8"; + private static final String KEY_NINE = "file9"; + private static final String KEY_TEN = "file10"; + private static final String KEY_ELEVEN = "file11"; + private static final String MULTI_BLOCK_KEY = "file7"; + private static final String MULTI_BLOCK_FILE = "file7"; + + private static final String FILE_ONE = "file1"; + private static final String FILE_TWO = "file2"; + private static final String FILE_THREE = "file3"; + private static final String FILE_FOUR = "file4"; + private static final String FILE_FIVE = "file5"; + private static final String FILE_SIX = "file6"; + private static final String FILE_SEVEN = "file7"; + private static final String FILE_EIGHT = "file8"; + private static final String FILE_NINE = "file9"; + private static final String FILE_TEN = "file10"; + private static final String FILE_ELEVEN = "file11"; + // objects IDs + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_SIX_OBJECT_ID = 10L; + private static final long MULTI_BLOCK_KEY_OBJECT_ID = 13L; + private static final long KEY_SEVEN_OBJECT_ID = 13L; + private static final long VOL_TWO_OBJECT_ID = 14L; + private static final long BUCKET_THREE_OBJECT_ID = 15L; + private static final long BUCKET_FOUR_OBJECT_ID = 16L; + private static final long KEY_EIGHT_OBJECT_ID = 17L; + private static final long KEY_NINE_OBJECT_ID = 19L; + private static final long KEY_TEN_OBJECT_ID = 20L; + private static final long KEY_ELEVEN_OBJECT_ID = 21L; + + // container IDs + private static final long CONTAINER_ONE_ID = 1L; + private static final long CONTAINER_TWO_ID = 2L; + private static final long CONTAINER_THREE_ID = 3L; + private static final long CONTAINER_FOUR_ID = 4L; + private static final long CONTAINER_FIVE_ID = 5L; + private static final long CONTAINER_SIX_ID = 6L; + + // replication factors + private static final int CONTAINER_ONE_REPLICA_COUNT = 3; + private static final int CONTAINER_TWO_REPLICA_COUNT = 2; + private static final int CONTAINER_THREE_REPLICA_COUNT = 4; + private static final int CONTAINER_FOUR_REPLICA_COUNT = 5; + private static final int CONTAINER_FIVE_REPLICA_COUNT = 2; + private static final int CONTAINER_SIX_REPLICA_COUNT = 3; + + // block lengths + private static final long BLOCK_ONE_LENGTH = 1000L; + private static final long BLOCK_TWO_LENGTH = 2000L; + private static final long BLOCK_THREE_LENGTH = 3000L; + private static final long BLOCK_FOUR_LENGTH = 4000L; + private static final long BLOCK_FIVE_LENGTH = 5000L; + private static final long BLOCK_SIX_LENGTH = 6000L; + + // data size in bytes + private static final long KEY_ONE_SIZE = 500L; // bin 0 + private static final long KEY_TWO_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long KEY_THREE_SIZE = 4 * OzoneConsts.KB + 1; // bin 3 + private static final long KEY_FOUR_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_FIVE_SIZE = 100L; // bin 0 + private static final long KEY_SIX_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_SEVEN_SIZE = 4 * OzoneConsts.KB + 1; + private static final long KEY_EIGHT_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long KEY_NINE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long KEY_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1 + + private static final long FILE1_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_ONE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE2_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_TWO_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE3_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_THREE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE4_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_FOUR_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE5_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_FIVE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE6_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_SIX_SIZE, + StandaloneReplicationConfig.getInstance(ONE));; + private static final long FILE7_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_SEVEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE));; + private static final long FILE8_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_EIGHT_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE9_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_NINE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE10_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_TEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE11_SIZE_WITH_REPLICA = + getReplicatedSize(KEY_ELEVEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + + private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA + = FILE7_SIZE_WITH_REPLICA; + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA + + FILE4_SIZE_WITH_REPLICA + + FILE5_SIZE_WITH_REPLICA + + FILE8_SIZE_WITH_REPLICA + + FILE9_SIZE_WITH_REPLICA + + FILE10_SIZE_WITH_REPLICA + + FILE11_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA + + FILE4_SIZE_WITH_REPLICA + + FILE5_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1 + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY + = FILE4_SIZE_WITH_REPLICA; + + // quota in bytes + private static final long ROOT_QUOTA = 2 * (2 * OzoneConsts.MB); + private static final long VOL_QUOTA = 2 * OzoneConsts.MB; + private static final long VOL_TWO_QUOTA = 2 * OzoneConsts.MB; + private static final long BUCKET_ONE_QUOTA = OzoneConsts.MB; + private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB; + private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB; + private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB; + + // mock client's path requests + private static final String TEST_USER = "TestUser"; + private static final String ROOT_PATH = "/"; + private static final String VOL_PATH = "/vol"; + private static final String VOL_TWO_PATH = "/vol2"; + private static final String BUCKET_ONE_PATH = "/vol/bucket1"; + private static final String BUCKET_TWO_PATH = "/vol/bucket2"; + private static final String KEY_PATH = "/vol/bucket2/file4"; + private static final String MULTI_BLOCK_KEY_PATH = "/vol/bucket1/file7"; + private static final String INVALID_PATH = "/vol/path/not/found"; + + // some expected answers + private static final long ROOT_DATA_SIZE = + KEY_ONE_SIZE + KEY_TWO_SIZE + KEY_THREE_SIZE + KEY_FOUR_SIZE + + KEY_FIVE_SIZE + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + + KEY_ELEVEN_SIZE; + private static final long VOL_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + + KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE; + + private static final long VOL_TWO_DATA_SIZE = + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE; + + private static final long BUCKET_ONE_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + + KEY_THREE_SIZE; + + private static final long BUCKET_TWO_DATA_SIZE = + KEY_FOUR_SIZE + KEY_FIVE_SIZE; + + + @BeforeEach + public void setUp() throws Exception { + conf = new OzoneConfiguration(); + OMMetadataManager omMetadataManager = initializeNewOmMetadataManager( + Files.createDirectory(temporaryFolder.resolve( + "JunitOmDBDir")).toFile(), conf); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProvider(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + Files.createDirectory(temporaryFolder.resolve( + "omMetadatDir")).toFile()); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .addBinding(OzoneStorageContainerManager.class, + getMockReconSCM()) + .addBinding(StorageContainerServiceProvider.class, + mock(StorageContainerServiceProviderImpl.class)) + .addBinding(NSSummaryEndpoint.class) + .build(); + ReconNamespaceSummaryManager reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class); + + // populate OM DB and reprocess into Recon RocksDB + populateOMDB(); + NSSummaryTaskWithOBS nsSummaryTaskWithOBS = + new NSSummaryTaskWithOBS(reconNamespaceSummaryManager, + reconOMMetadataManager, conf); + nsSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + commonUtils = new CommonUtils(); + } + + @Test + public void testUtility() { + String[] names = EntityHandler.parseRequestPath(TEST_PATH_UTILITY); + assertArrayEquals(TEST_NAMES, names); + String keyName = BucketHandler.getKeyName(names); + assertEquals(TEST_KEY_NAMES, keyName); + String subpath = BucketHandler.buildSubpath(PARENT_DIR, "file1.txt"); + assertEquals(TEST_PATH_UTILITY, subpath); + } + + @Test + public void testGetBasicInfoRoot() throws Exception { + // Test root basics + Response rootResponse = nsSummaryEndpoint.getBasicInfo(ROOT_PATH); + NamespaceSummaryResponse rootResponseObj = + (NamespaceSummaryResponse) rootResponse.getEntity(); + assertEquals(EntityType.ROOT, rootResponseObj.getEntityType()); + assertEquals(2, rootResponseObj.getCountStats().getNumVolume()); + assertEquals(4, rootResponseObj.getCountStats().getNumBucket()); + assertEquals(9, rootResponseObj.getCountStats().getNumTotalKey()); + } + + @Test + public void testGetBasicInfoVol() throws Exception { + // Test volume basics + Response volResponse = nsSummaryEndpoint.getBasicInfo(VOL_PATH); + NamespaceSummaryResponse volResponseObj = + (NamespaceSummaryResponse) volResponse.getEntity(); + assertEquals(EntityType.VOLUME, + volResponseObj.getEntityType()); + assertEquals(2, volResponseObj.getCountStats().getNumBucket()); + assertEquals(5, volResponseObj.getCountStats().getNumTotalKey()); + assertEquals("TestUser", ((VolumeObjectDBInfo) volResponseObj. + getObjectDBInfo()).getAdmin()); + assertEquals("TestUser", ((VolumeObjectDBInfo) volResponseObj. + getObjectDBInfo()).getOwner()); + assertEquals("vol", volResponseObj.getObjectDBInfo().getName()); + assertEquals(2097152, volResponseObj.getObjectDBInfo().getQuotaInBytes()); + assertEquals(-1, volResponseObj.getObjectDBInfo().getQuotaInNamespace()); + } + + @Test + public void testGetBasicInfoBucketOne() throws Exception { + // Test bucket 1's basics + Response bucketOneResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_ONE_PATH); + NamespaceSummaryResponse bucketOneObj = + (NamespaceSummaryResponse) bucketOneResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketOneObj.getEntityType()); + assertEquals(3, bucketOneObj.getCountStats().getNumTotalKey()); + assertEquals("vol", + ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketOneObj.getObjectDBInfo()).getStorageType()); + assertEquals(getBucketLayout(), + ((BucketObjectDBInfo) + bucketOneObj.getObjectDBInfo()).getBucketLayout()); + assertEquals("bucket1", + ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketTwo() throws Exception { + // Test bucket 2's basics + commonUtils.testNSSummaryBasicInfoBucketTwo( + BucketLayout.OBJECT_STORE, + nsSummaryEndpoint); + } + + @Test + public void testGetBasicInfoNoPath() throws Exception { + // Test invalid path + commonUtils.testNSSummaryBasicInfoNoPath(nsSummaryEndpoint); + } + + @Test + public void testGetBasicInfoKey() throws Exception { + // Test key + commonUtils.testNSSummaryBasicInfoKey(nsSummaryEndpoint); + } + + @Test + public void testDiskUsageRoot() throws Exception { + // root level DU + Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, + false, false); + DUResponse duRootRes = (DUResponse) rootResponse.getEntity(); + assertEquals(2, duRootRes.getCount()); + List duRootData = duRootRes.getDuData(); + // sort based on subpath + Collections.sort(duRootData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duVol1 = duRootData.get(0); + DUResponse.DiskUsage duVol2 = duRootData.get(1); + assertEquals(VOL_PATH, duVol1.getSubpath()); + assertEquals(VOL_TWO_PATH, duVol2.getSubpath()); + assertEquals(VOL_DATA_SIZE, duVol1.getSize()); + assertEquals(VOL_TWO_DATA_SIZE, duVol2.getSize()); + } + + @Test + public void testDiskUsageVolume() throws Exception { + // volume level DU + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, + false, false); + DUResponse duVolRes = (DUResponse) volResponse.getEntity(); + assertEquals(2, duVolRes.getCount()); + List duData = duVolRes.getDuData(); + // sort based on subpath + Collections.sort(duData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duBucket1 = duData.get(0); + DUResponse.DiskUsage duBucket2 = duData.get(1); + assertEquals(BUCKET_ONE_PATH, duBucket1.getSubpath()); + assertEquals(BUCKET_TWO_PATH, duBucket2.getSubpath()); + assertEquals(BUCKET_ONE_DATA_SIZE, duBucket1.getSize()); + assertEquals(BUCKET_TWO_DATA_SIZE, duBucket2.getSize()); + } + + @Test + public void testDiskUsageBucket() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, + false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + assertEquals(BUCKET_ONE_DATA_SIZE, duBucketResponse.getSize()); + } + + @Test + public void testDiskUsageKey() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, + false, false); + DUResponse keyObj = (DUResponse) keyResponse.getEntity(); + assertEquals(0, keyObj.getCount()); + assertEquals(KEY_FOUR_SIZE, keyObj.getSize()); + } + + @Test + public void testDiskUsageUnknown() throws Exception { + // invalid path check + Response invalidResponse = nsSummaryEndpoint.getDiskUsage(INVALID_PATH, + false, false); + DUResponse invalidObj = (DUResponse) invalidResponse.getEntity(); + assertEquals(ResponseStatus.PATH_NOT_FOUND, + invalidObj.getStatus()); + } + + @Test + public void testDiskUsageWithReplication() throws Exception { + setUpMultiBlockKey(); + Response keyResponse = nsSummaryEndpoint.getDiskUsage(MULTI_BLOCK_KEY_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_KEY_SIZE_WITH_REPLICA, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderRootWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + // withReplica is true + Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) rootResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT, + replicaDUResponse.getSizeWithReplica()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL, + replicaDUResponse.getDuData().get(0).getSizeWithReplica()); + + } + + @Test + public void testDataSizeUnderVolWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) volResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL, + replicaDUResponse.getSizeWithReplica()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1, + replicaDUResponse.getDuData().get(0).getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderBucketWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderKeyWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testQuotaUsage() throws Exception { + // root level quota usage + Response rootResponse = nsSummaryEndpoint.getQuotaUsage(ROOT_PATH); + QuotaUsageResponse quRootRes = + (QuotaUsageResponse) rootResponse.getEntity(); + assertEquals(ROOT_QUOTA, quRootRes.getQuota()); + assertEquals(ROOT_DATA_SIZE, quRootRes.getQuotaUsed()); + + // volume level quota usage + Response volResponse = nsSummaryEndpoint.getQuotaUsage(VOL_PATH); + QuotaUsageResponse quVolRes = (QuotaUsageResponse) volResponse.getEntity(); + assertEquals(VOL_QUOTA, quVolRes.getQuota()); + assertEquals(VOL_DATA_SIZE, quVolRes.getQuotaUsed()); + + // bucket level quota usage + Response bucketRes = nsSummaryEndpoint.getQuotaUsage(BUCKET_ONE_PATH); + QuotaUsageResponse quBucketRes = (QuotaUsageResponse) bucketRes.getEntity(); + assertEquals(BUCKET_ONE_QUOTA, quBucketRes.getQuota()); + assertEquals(BUCKET_ONE_DATA_SIZE, quBucketRes.getQuotaUsed()); + + Response bucketRes2 = nsSummaryEndpoint.getQuotaUsage(BUCKET_TWO_PATH); + QuotaUsageResponse quBucketRes2 = + (QuotaUsageResponse) bucketRes2.getEntity(); + assertEquals(BUCKET_TWO_QUOTA, quBucketRes2.getQuota()); + assertEquals(BUCKET_TWO_DATA_SIZE, quBucketRes2.getQuotaUsed()); + + // other level not applicable + Response naResponse2 = nsSummaryEndpoint.getQuotaUsage(KEY_PATH); + QuotaUsageResponse quotaUsageResponse2 = + (QuotaUsageResponse) naResponse2.getEntity(); + assertEquals(ResponseStatus.TYPE_NOT_APPLICABLE, + quotaUsageResponse2.getResponseCode()); + + // invalid path request + Response invalidRes = nsSummaryEndpoint.getQuotaUsage(INVALID_PATH); + QuotaUsageResponse invalidResObj = + (QuotaUsageResponse) invalidRes.getEntity(); + assertEquals(ResponseStatus.PATH_NOT_FOUND, + invalidResObj.getResponseCode()); + } + + + @Test + public void testFileSizeDist() throws Exception { + checkFileSizeDist(ROOT_PATH, 2, 3, 3, 1); + checkFileSizeDist(VOL_PATH, 2, 1, 1, 1); + checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 0, 1); + } + + public void checkFileSizeDist(String path, int bin0, + int bin1, int bin2, int bin3) throws Exception { + Response res = nsSummaryEndpoint.getFileSizeDistribution(path); + FileSizeDistributionResponse fileSizeDistResObj = + (FileSizeDistributionResponse) res.getEntity(); + int[] fileSizeDist = fileSizeDistResObj.getFileSizeDist(); + assertEquals(bin0, fileSizeDist[0]); + assertEquals(bin1, fileSizeDist[1]); + assertEquals(bin2, fileSizeDist[2]); + assertEquals(bin3, fileSizeDist[3]); + for (int i = 4; i < ReconConstants.NUM_OF_FILE_SIZE_BINS; ++i) { + assertEquals(0, fileSizeDist[i]); + } + } + + /** + * Testing the following case. + * ├── vol + * │ ├── bucket1 + * │ │ ├── file1 + * │ │ └── file2 + * │ │ └── file3 + * │ └── bucket2 + * │ ├── file4 + * │ └── file5 + * └── vol2 + * ├── bucket3 + * │ ├── file8 + * │ ├── file9 + * │ └── file10 + * └── bucket4 + * └── file11 + * + * Write these keys to OM and + * replicate them. + */ + @SuppressWarnings("checkstyle:MethodLength") + private void populateOMDB() throws Exception { + + // write all keys + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + FILE_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + FILE_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + FILE_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + FILE_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FOUR_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + FILE_FIVE, + KEY_FIVE_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FIVE_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_EIGHT, + BUCKET_THREE, + VOL_TWO, + FILE_EIGHT, + KEY_EIGHT_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_EIGHT_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_NINE, + BUCKET_THREE, + VOL_TWO, + FILE_NINE, + KEY_NINE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_NINE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TEN, + BUCKET_THREE, + VOL_TWO, + FILE_TEN, + KEY_TEN_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_TEN_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_ELEVEN, + BUCKET_FOUR, + VOL_TWO, + FILE_ELEVEN, + KEY_ELEVEN_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FOUR_OBJECT_ID, + VOL_TWO_OBJECT_ID, + KEY_ELEVEN_SIZE, + getBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * @throws IOException ioEx + */ + private static OMMetadataManager initializeNewOmMetadataManager( + File omDbDir, OzoneConfiguration omConfiguration) + throws IOException { + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .setQuotaInBytes(VOL_QUOTA) + .build(); + + String volume2Key = omMetadataManager.getVolumeKey(VOL_TWO); + OmVolumeArgs args2 = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_TWO_OBJECT_ID) + .setVolume(VOL_TWO) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .setQuotaInBytes(VOL_TWO_QUOTA) + .build(); + + omMetadataManager.getVolumeTable().put(volumeKey, args); + omMetadataManager.getVolumeTable().put(volume2Key, args2); + + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setQuotaInBytes(BUCKET_ONE_QUOTA) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setQuotaInBytes(BUCKET_TWO_QUOTA) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo3 = OmBucketInfo.newBuilder() + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_THREE) + .setObjectID(BUCKET_THREE_OBJECT_ID) + .setQuotaInBytes(BUCKET_THREE_QUOTA) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo4 = OmBucketInfo.newBuilder() + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_FOUR) + .setObjectID(BUCKET_FOUR_OBJECT_ID) + .setQuotaInBytes(BUCKET_FOUR_QUOTA) + .setBucketLayout(getBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo.getVolumeName(), bucketInfo.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + String bucketKey3 = omMetadataManager.getBucketKey( + bucketInfo3.getVolumeName(), bucketInfo3.getBucketName()); + String bucketKey4 = omMetadataManager.getBucketKey( + bucketInfo4.getVolumeName(), bucketInfo4.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3); + omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4); + + return omMetadataManager; + } + + private void setUpMultiBlockKey() throws IOException { + OmKeyLocationInfoGroup locationInfoGroup = + getLocationInfoGroup1(); + + // add the multi-block key to Recon's OM + writeKeyToOm(reconOMMetadataManager, + MULTI_BLOCK_KEY, + BUCKET_ONE, + VOL, + MULTI_BLOCK_FILE, + MULTI_BLOCK_KEY_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup), + getBucketLayout(), + KEY_SEVEN_SIZE); + } + + private OmKeyLocationInfoGroup getLocationInfoGroup1() { + List locationInfoList = new ArrayList<>(); + BlockID block1 = new BlockID(CONTAINER_ONE_ID, 0L); + BlockID block2 = new BlockID(CONTAINER_TWO_ID, 0L); + BlockID block3 = new BlockID(CONTAINER_THREE_ID, 0L); + + OmKeyLocationInfo location1 = new OmKeyLocationInfo.Builder() + .setBlockID(block1) + .setLength(BLOCK_ONE_LENGTH) + .build(); + OmKeyLocationInfo location2 = new OmKeyLocationInfo.Builder() + .setBlockID(block2) + .setLength(BLOCK_TWO_LENGTH) + .build(); + OmKeyLocationInfo location3 = new OmKeyLocationInfo.Builder() + .setBlockID(block3) + .setLength(BLOCK_THREE_LENGTH) + .build(); + locationInfoList.add(location1); + locationInfoList.add(location2); + locationInfoList.add(location3); + + return new OmKeyLocationInfoGroup(0L, locationInfoList); + } + + + private OmKeyLocationInfoGroup getLocationInfoGroup2() { + List locationInfoList = new ArrayList<>(); + BlockID block4 = new BlockID(CONTAINER_FOUR_ID, 0L); + BlockID block5 = new BlockID(CONTAINER_FIVE_ID, 0L); + BlockID block6 = new BlockID(CONTAINER_SIX_ID, 0L); + + OmKeyLocationInfo location4 = new OmKeyLocationInfo.Builder() + .setBlockID(block4) + .setLength(BLOCK_FOUR_LENGTH) + .build(); + OmKeyLocationInfo location5 = new OmKeyLocationInfo.Builder() + .setBlockID(block5) + .setLength(BLOCK_FIVE_LENGTH) + .build(); + OmKeyLocationInfo location6 = new OmKeyLocationInfo.Builder() + .setBlockID(block6) + .setLength(BLOCK_SIX_LENGTH) + .build(); + locationInfoList.add(location4); + locationInfoList.add(location5); + locationInfoList.add(location6); + return new OmKeyLocationInfoGroup(0L, locationInfoList); + + } + + @SuppressWarnings("checkstyle:MethodLength") + private void setUpMultiBlockReplicatedKeys() throws IOException { + OmKeyLocationInfoGroup locationInfoGroup1 = + getLocationInfoGroup1(); + OmKeyLocationInfoGroup locationInfoGroup2 = + getLocationInfoGroup2(); + + //vol/bucket1/file1 + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + FILE_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getBucketLayout(), + KEY_ONE_SIZE); + + //vol/bucket1/file2 + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + FILE_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getBucketLayout(), + KEY_TWO_SIZE); + + //vol/bucket1/file3 + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + FILE_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getBucketLayout(), + KEY_THREE_SIZE); + + //vol/bucket2/file4 + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + FILE_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getBucketLayout(), + KEY_FOUR_SIZE); + + //vol/bucket2/file5 + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + FILE_FIVE, + KEY_FIVE_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getBucketLayout(), + KEY_FIVE_SIZE); + + //vol2/bucket3/file8 + writeKeyToOm(reconOMMetadataManager, + KEY_EIGHT, + BUCKET_THREE, + VOL_TWO, + FILE_EIGHT, + KEY_EIGHT_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getBucketLayout(), + KEY_EIGHT_SIZE); + + //vol2/bucket3/file9 + writeKeyToOm(reconOMMetadataManager, + KEY_NINE, + BUCKET_THREE, + VOL_TWO, + FILE_NINE, + KEY_NINE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getBucketLayout(), + KEY_NINE_SIZE); + + //vol2/bucket3/file10 + writeKeyToOm(reconOMMetadataManager, + KEY_TEN, + BUCKET_THREE, + VOL_TWO, + FILE_TEN, + KEY_TEN_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getBucketLayout(), + KEY_TEN_SIZE); + + //vol2/bucket4/file11 + writeKeyToOm(reconOMMetadataManager, + KEY_ELEVEN, + BUCKET_FOUR, + VOL_TWO, + FILE_ELEVEN, + KEY_ELEVEN_OBJECT_ID, + BUCKET_FOUR_OBJECT_ID, + BUCKET_FOUR_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getBucketLayout(), + KEY_ELEVEN_SIZE); + } + + /** + * Generate a set of mock container replica with a size of + * replication factor for container. + * @param replicationFactor number of replica + * @param containerID the container replicated based upon + * @return a set of container replica for testing + */ + private static Set generateMockContainerReplicas( + int replicationFactor, ContainerID containerID) { + Set result = new HashSet<>(); + for (int i = 0; i < replicationFactor; ++i) { + DatanodeDetails randomDatanode = randomDatanodeDetails(); + ContainerReplica replica = new ContainerReplica.ContainerReplicaBuilder() + .setContainerID(containerID) + .setContainerState( + StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.OPEN) + .setDatanodeDetails(randomDatanode) + .build(); + result.add(replica); + } + return result; + } + + private static ReconStorageContainerManagerFacade getMockReconSCM() + throws ContainerNotFoundException { + ReconStorageContainerManagerFacade reconSCM = + mock(ReconStorageContainerManagerFacade.class); + ContainerManager containerManager = mock(ContainerManager.class); + + // Container 1 is 3-way replicated + ContainerID containerID1 = new ContainerID(CONTAINER_ONE_ID); + Set containerReplicas1 = generateMockContainerReplicas( + CONTAINER_ONE_REPLICA_COUNT, containerID1); + when(containerManager.getContainerReplicas(containerID1)) + .thenReturn(containerReplicas1); + + // Container 2 is under replicated with 2 replica + ContainerID containerID2 = new ContainerID(CONTAINER_TWO_ID); + Set containerReplicas2 = generateMockContainerReplicas( + CONTAINER_TWO_REPLICA_COUNT, containerID2); + when(containerManager.getContainerReplicas(containerID2)) + .thenReturn(containerReplicas2); + + // Container 3 is over replicated with 4 replica + ContainerID containerID3 = new ContainerID(CONTAINER_THREE_ID); + Set containerReplicas3 = generateMockContainerReplicas( + CONTAINER_THREE_REPLICA_COUNT, containerID3); + when(containerManager.getContainerReplicas(containerID3)) + .thenReturn(containerReplicas3); + + // Container 4 is replicated with 5 replica + ContainerID containerID4 = new ContainerID(CONTAINER_FOUR_ID); + Set containerReplicas4 = generateMockContainerReplicas( + CONTAINER_FOUR_REPLICA_COUNT, containerID4); + when(containerManager.getContainerReplicas(containerID4)) + .thenReturn(containerReplicas4); + + // Container 5 is replicated with 2 replica + ContainerID containerID5 = new ContainerID(CONTAINER_FIVE_ID); + Set containerReplicas5 = generateMockContainerReplicas( + CONTAINER_FIVE_REPLICA_COUNT, containerID5); + when(containerManager.getContainerReplicas(containerID5)) + .thenReturn(containerReplicas5); + + // Container 6 is replicated with 3 replica + ContainerID containerID6 = new ContainerID(CONTAINER_SIX_ID); + Set containerReplicas6 = generateMockContainerReplicas( + CONTAINER_SIX_REPLICA_COUNT, containerID6); + when(containerManager.getContainerReplicas(containerID6)) + .thenReturn(containerReplicas6); + + when(reconSCM.getContainerManager()).thenReturn(containerManager); + ReconNodeManager mockReconNodeManager = mock(ReconNodeManager.class); + when(mockReconNodeManager.getStats()).thenReturn(getMockSCMRootStat()); + when(reconSCM.getScmNodeManager()).thenReturn(mockReconNodeManager); + return reconSCM; + } + + private static BucketLayout getBucketLayout() { + return BucketLayout.OBJECT_STORE; + } + + private static SCMNodeStat getMockSCMRootStat() { + return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, + ROOT_QUOTA - ROOT_DATA_SIZE); + } + +} From 57801b247bc46f2fe23d86031d4135f1c0fcc775 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 11 Feb 2024 20:45:25 +0530 Subject: [PATCH 24/31] Fixed checkstyle --- .../ozone/recon/api/TestNSSummaryEndpointWithOBS.java | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java index fef23ef7e871..2b721b1d56c2 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java @@ -28,7 +28,14 @@ import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; -import org.apache.hadoop.ozone.recon.api.types.*; +import org.apache.hadoop.ozone.recon.api.types.BucketObjectDBInfo; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.api.types.EntityType; +import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse; +import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse; +import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; +import org.apache.hadoop.ozone.recon.api.types.VolumeObjectDBInfo; +import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; import org.apache.hadoop.ozone.recon.common.CommonUtils; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; @@ -58,9 +65,7 @@ import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.mockito.Mockito.mock; From 3e8ec2ac39999276ed37b8578a90e925db8ddd55 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 11 Feb 2024 20:51:31 +0530 Subject: [PATCH 25/31] Added license --- .../api/TestNSSummaryEndpointWithOBS.java | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java index 2b721b1d56c2..ae4fcd4ce648 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java @@ -1,3 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.ozone.recon.api; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; From 76158c9cdb6d718c90b512b60766395263cf8c93 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 15 Feb 2024 12:57:53 +0530 Subject: [PATCH 26/31] Made changes for code review --- .../recon/tasks/NSSummaryTaskWithOBS.java | 6 +- .../api/TestNSSummaryEndpointWithOBS.java | 2 +- .../recon/tasks/TestNSSummaryTaskWithOBS.java | 82 +++++++++---------- 3 files changed, 42 insertions(+), 48 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java index 4585ae05f634..af2dee802e39 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java @@ -85,7 +85,7 @@ public boolean reprocessWithOBS(OMMetadataManager omMetadataManager) { OmBucketInfo omBucketInfo = omMetadataManager .getBucketTable().getSkipCache(bucketDBKey); - if (omBucketInfo.getBucketLayout() != BucketLayout.OBJECT_STORE) { + if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { continue; } @@ -152,7 +152,7 @@ public boolean processWithOBS(OMUpdateEventBatch events) { OmBucketInfo omBucketInfo = getReconOMMetadataManager().getBucketTable() .getSkipCache(bucketDBKey); - if (omBucketInfo.getBucketLayout() != BucketLayout.OBJECT_STORE) { + if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { continue; } @@ -224,7 +224,7 @@ private void setKeyParentID(OmKeyInfo keyInfo) keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); } else { throw new IOException("ParentKeyInfo for " + - "NSSummaryTaskWithLegacy is null"); + "NSSummaryTaskWithOBS is null"); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java index ae4fcd4ce648..22879653c3c9 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java @@ -1127,7 +1127,7 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE); + ROOT_QUOTA - ROOT_DATA_SIZE,0L,0L); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index ae69b055febd..5b33a8a573ad 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Nested; @@ -53,6 +52,10 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + /** * Unit test for NSSummaryTaskWithOBS. */ @@ -123,7 +126,7 @@ public static void setUp(@TempDir File tmpDir) throws Exception { NSSummary nonExistentSummary = reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); - Assertions.assertNull(nonExistentSummary); + assertNull(nonExistentSummary); populateOMDB(); @@ -152,35 +155,35 @@ public void setUp() throws IOException { reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation); // Verify commit - Assertions.assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); // reinit Recon RocksDB's namespace CF. reconNamespaceSummaryManager.clearNSSummaryTable(); nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); - Assertions.assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); nsSummaryForBucket1 = reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); nsSummaryForBucket2 = reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); - Assertions.assertNotNull(nsSummaryForBucket1); - Assertions.assertNotNull(nsSummaryForBucket2); + assertNotNull(nsSummaryForBucket1); + assertNotNull(nsSummaryForBucket2); } @Test public void testReprocessNSSummaryNull() throws IOException { - Assertions.assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); } @Test public void testReprocessGetFiles() { - Assertions.assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); - Assertions.assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); - Assertions.assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, + assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, nsSummaryForBucket1.getSizeOfFiles()); - Assertions.assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, nsSummaryForBucket2.getSizeOfFiles()); } @@ -188,18 +191,18 @@ public void testReprocessGetFiles() { public void testReprocessFileBucketSize() { int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); - Assertions.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, fileDistBucket1.length); - Assertions.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, fileDistBucket2.length); // Check for 1's and 0's in fileDistBucket1 int[] expectedIndexes1 = {0, 1, 40}; for (int index = 0; index < fileDistBucket1.length; index++) { if (contains(expectedIndexes1, index)) { - Assertions.assertEquals(1, fileDistBucket1[index]); + assertEquals(1, fileDistBucket1[index]); } else { - Assertions.assertEquals(0, fileDistBucket1[index]); + assertEquals(0, fileDistBucket1[index]); } } @@ -207,22 +210,13 @@ public void testReprocessFileBucketSize() { int[] expectedIndexes2 = {0, 2}; for (int index = 0; index < fileDistBucket2.length; index++) { if (contains(expectedIndexes2, index)) { - Assertions.assertEquals(1, fileDistBucket2[index]); + assertEquals(1, fileDistBucket2[index]); } else { - Assertions.assertEquals(0, fileDistBucket2[index]); + assertEquals(0, fileDistBucket2[index]); } } } - @Test - public void testReprocessBucketDirs() { - // None of the buckets have any child dirs because OBS is flat namespace. - Set childDirBucketOne = nsSummaryForBucket1.getChildDir(); - Set childDirBucketTwo = nsSummaryForBucket2.getChildDir(); - Assertions.assertEquals(0, childDirBucketOne.size()); - Assertions.assertEquals(0, childDirBucketTwo.size()); - } - } /** @@ -248,10 +242,10 @@ public void setUp() throws IOException { nsSummaryForBucket1 = reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); - Assertions.assertNotNull(nsSummaryForBucket1); + assertNotNull(nsSummaryForBucket1); nsSummaryForBucket2 = reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); - Assertions.assertNotNull(nsSummaryForBucket2); + assertNotNull(nsSummaryForBucket2); } private OMUpdateEventBatch processEventBatch() throws IOException { @@ -332,25 +326,25 @@ private OMUpdateEventBatch processEventBatch() throws IOException { @Test public void testProcessForCount() throws IOException { - Assertions.assertNotNull(nsSummaryForBucket1); - Assertions.assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); - Assertions.assertNotNull(nsSummaryForBucket2); - Assertions.assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); + assertNotNull(nsSummaryForBucket1); + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); Set childDirBucket1 = nsSummaryForBucket1.getChildDir(); - Assertions.assertEquals(0, childDirBucket1.size()); + assertEquals(0, childDirBucket1.size()); Set childDirBucket2 = nsSummaryForBucket2.getChildDir(); - Assertions.assertEquals(0, childDirBucket2.size()); + assertEquals(0, childDirBucket2.size()); } @Test public void testProcessForSize() throws IOException { - Assertions.assertNotNull(nsSummaryForBucket1); - Assertions.assertEquals( + assertNotNull(nsSummaryForBucket1); + assertEquals( KEY_THREE_SIZE + KEY_SEVEN_SIZE + KEY_TWO_OLD_SIZE + 100, nsSummaryForBucket1.getSizeOfFiles()); - Assertions.assertNotNull(nsSummaryForBucket2); - Assertions.assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, + assertNotNull(nsSummaryForBucket2); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, nsSummaryForBucket2.getSizeOfFiles()); } @@ -359,18 +353,18 @@ public void testProcessForSize() throws IOException { public void testProcessFileBucketSize() { int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); - Assertions.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, fileDistBucket1.length); - Assertions.assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, fileDistBucket2.length); // Check for 1's and 0's in fileDistBucket1 int[] expectedIndexes1 = {1, 3, 40}; for (int index = 0; index < fileDistBucket1.length; index++) { if (contains(expectedIndexes1, index)) { - Assertions.assertEquals(1, fileDistBucket1[index]); + assertEquals(1, fileDistBucket1[index]); } else { - Assertions.assertEquals(0, fileDistBucket1[index]); + assertEquals(0, fileDistBucket1[index]); } } @@ -378,9 +372,9 @@ public void testProcessFileBucketSize() { int[] expectedIndexes2 = {0, 2, 3}; for (int index = 0; index < fileDistBucket2.length; index++) { if (contains(expectedIndexes2, index)) { - Assertions.assertEquals(1, fileDistBucket2[index]); + assertEquals(1, fileDistBucket2[index]); } else { - Assertions.assertEquals(0, fileDistBucket2[index]); + assertEquals(0, fileDistBucket2[index]); } } } From cf04f83c229fc7bae1c6f44de8cc1b60a062d3a8 Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 27 Feb 2024 14:00:46 +0530 Subject: [PATCH 27/31] Fixed review comments --- .../hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java index 22879653c3c9..ef8bed0e75ef 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java @@ -129,8 +129,6 @@ public class TestNSSummaryEndpointWithOBS { private static final String KEY_THREE = "file3"; private static final String KEY_FOUR = "file4"; private static final String KEY_FIVE = "file5"; - private static final String KEY_SIX = "file6"; - private static final String KEY_SEVEN = "file7"; private static final String KEY_EIGHT = "file8"; private static final String KEY_NINE = "file9"; private static final String KEY_TEN = "file10"; @@ -143,8 +141,6 @@ public class TestNSSummaryEndpointWithOBS { private static final String FILE_THREE = "file3"; private static final String FILE_FOUR = "file4"; private static final String FILE_FIVE = "file5"; - private static final String FILE_SIX = "file6"; - private static final String FILE_SEVEN = "file7"; private static final String FILE_EIGHT = "file8"; private static final String FILE_NINE = "file9"; private static final String FILE_TEN = "file10"; From dca31d36996459897fd5d10060e33b1e02e6092e Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 27 Feb 2024 17:51:37 +0530 Subject: [PATCH 28/31] Fixed more review comments --- .../api/TestNSSummaryEndpointWithOBS.java | 32 +++++++++---------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java index ef8bed0e75ef..3a88385e5190 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java @@ -66,6 +66,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; + import javax.ws.rs.core.Response; import java.io.File; @@ -133,8 +134,8 @@ public class TestNSSummaryEndpointWithOBS { private static final String KEY_NINE = "file9"; private static final String KEY_TEN = "file10"; private static final String KEY_ELEVEN = "file11"; - private static final String MULTI_BLOCK_KEY = "file7"; - private static final String MULTI_BLOCK_FILE = "file7"; + private static final String MULTI_BLOCK_KEY = "file3"; + private static final String MULTI_BLOCK_FILE = "file3"; private static final String FILE_ONE = "file1"; private static final String FILE_TWO = "file2"; @@ -218,12 +219,7 @@ public class TestNSSummaryEndpointWithOBS { private static final long FILE5_SIZE_WITH_REPLICA = getReplicatedSize(KEY_FIVE_SIZE, StandaloneReplicationConfig.getInstance(ONE)); - private static final long FILE6_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_SIX_SIZE, - StandaloneReplicationConfig.getInstance(ONE));; - private static final long FILE7_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_SEVEN_SIZE, - StandaloneReplicationConfig.getInstance(ONE));; + private static final long FILE8_SIZE_WITH_REPLICA = getReplicatedSize(KEY_EIGHT_SIZE, StandaloneReplicationConfig.getInstance(ONE)); @@ -238,7 +234,7 @@ public class TestNSSummaryEndpointWithOBS { StandaloneReplicationConfig.getInstance(ONE)); private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA - = FILE7_SIZE_WITH_REPLICA; + = FILE3_SIZE_WITH_REPLICA; private static final long MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT = FILE1_SIZE_WITH_REPLICA @@ -286,7 +282,7 @@ public class TestNSSummaryEndpointWithOBS { private static final String BUCKET_ONE_PATH = "/vol/bucket1"; private static final String BUCKET_TWO_PATH = "/vol/bucket2"; private static final String KEY_PATH = "/vol/bucket2/file4"; - private static final String MULTI_BLOCK_KEY_PATH = "/vol/bucket1/file7"; + private static final String MULTI_BLOCK_KEY_PATH = "/vol/bucket1/file3"; private static final String INVALID_PATH = "/vol/path/not/found"; // some expected answers @@ -376,11 +372,11 @@ public void testGetBasicInfoVol() throws Exception { volResponseObj.getEntityType()); assertEquals(2, volResponseObj.getCountStats().getNumBucket()); assertEquals(5, volResponseObj.getCountStats().getNumTotalKey()); - assertEquals("TestUser", ((VolumeObjectDBInfo) volResponseObj. + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj. getObjectDBInfo()).getAdmin()); - assertEquals("TestUser", ((VolumeObjectDBInfo) volResponseObj. + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj. getObjectDBInfo()).getOwner()); - assertEquals("vol", volResponseObj.getObjectDBInfo().getName()); + assertEquals(VOL, volResponseObj.getObjectDBInfo().getName()); assertEquals(2097152, volResponseObj.getObjectDBInfo().getQuotaInBytes()); assertEquals(-1, volResponseObj.getObjectDBInfo().getQuotaInNamespace()); } @@ -394,7 +390,7 @@ public void testGetBasicInfoBucketOne() throws Exception { (NamespaceSummaryResponse) bucketOneResponse.getEntity(); assertEquals(EntityType.BUCKET, bucketOneObj.getEntityType()); assertEquals(3, bucketOneObj.getCountStats().getNumTotalKey()); - assertEquals("vol", + assertEquals(VOL, ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getVolumeName()); assertEquals(StorageType.DISK, ((BucketObjectDBInfo) @@ -402,7 +398,7 @@ public void testGetBasicInfoBucketOne() throws Exception { assertEquals(getBucketLayout(), ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getBucketLayout()); - assertEquals("bucket1", + assertEquals(BUCKET_ONE, ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getName()); } @@ -751,6 +747,7 @@ private void populateOMDB() throws Exception { /** * Create a new OM Metadata manager instance with one user, one vol, and two * buckets. + * * @throws IOException ioEx */ private static OMMetadataManager initializeNewOmMetadataManager( @@ -1042,8 +1039,9 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { /** * Generate a set of mock container replica with a size of * replication factor for container. + * * @param replicationFactor number of replica - * @param containerID the container replicated based upon + * @param containerID the container replicated based upon * @return a set of container replica for testing */ private static Set generateMockContainerReplicas( @@ -1123,7 +1121,7 @@ private static BucketLayout getBucketLayout() { private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, - ROOT_QUOTA - ROOT_DATA_SIZE,0L,0L); + ROOT_QUOTA - ROOT_DATA_SIZE, 0L, 0L); } } From 1d2d0b5a2a9047860d686215722fba160d204ef4 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 28 Feb 2024 23:14:07 +0530 Subject: [PATCH 29/31] Made changes to the TestNSSummaryTaskWithOBS --- .../api/TestNSSummaryEndpointWithOBS.java | 200 +++++++++--------- 1 file changed, 100 insertions(+), 100 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java index 3a88385e5190..b68425a2edd6 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java @@ -93,12 +93,22 @@ /** * Test for NSSummary REST APIs with OBS. * Testing is done on a simple object store model with a flat hierarchy: - * vol - * / \ - * bucket1 bucket2 - * | | - * file1 file2 - * + * Testing the following case. + * ├── vol + * │ ├── bucket1 + * │ │ ├── file1 + * │ │ └── file2 + * │ │ └── file3 + * │ └── bucket2 + * │ ├── file4 + * │ └── file5 + * └── vol2 + * ├── bucket3 + * │ ├── file8 + * │ ├── file9 + * │ └── file10 + * └── bucket4 + * └── file11 * This tests the Rest APIs for NSSummary in the context of OBS buckets, * focusing on disk usage, quota usage, and file size distribution. */ @@ -125,18 +135,6 @@ public class TestNSSummaryEndpointWithOBS { private static final String BUCKET_TWO = "bucket2"; private static final String BUCKET_THREE = "bucket3"; private static final String BUCKET_FOUR = "bucket4"; - private static final String KEY_ONE = "file1"; - private static final String KEY_TWO = "file2"; - private static final String KEY_THREE = "file3"; - private static final String KEY_FOUR = "file4"; - private static final String KEY_FIVE = "file5"; - private static final String KEY_EIGHT = "file8"; - private static final String KEY_NINE = "file9"; - private static final String KEY_TEN = "file10"; - private static final String KEY_ELEVEN = "file11"; - private static final String MULTI_BLOCK_KEY = "file3"; - private static final String MULTI_BLOCK_FILE = "file3"; - private static final String FILE_ONE = "file1"; private static final String FILE_TWO = "file2"; private static final String FILE_THREE = "file3"; @@ -146,26 +144,25 @@ public class TestNSSummaryEndpointWithOBS { private static final String FILE_NINE = "file9"; private static final String FILE_TEN = "file10"; private static final String FILE_ELEVEN = "file11"; - // objects IDs + private static final String MULTI_BLOCK_FILE = FILE_THREE; + private static final long PARENT_OBJECT_ID_ZERO = 0L; private static final long VOL_OBJECT_ID = 0L; + private static final long VOL_TWO_OBJECT_ID = 14L; private static final long BUCKET_ONE_OBJECT_ID = 1L; private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long BUCKET_THREE_OBJECT_ID = 15L; + private static final long BUCKET_FOUR_OBJECT_ID = 16L; private static final long KEY_ONE_OBJECT_ID = 3L; private static final long KEY_TWO_OBJECT_ID = 5L; - private static final long KEY_FOUR_OBJECT_ID = 6L; private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FOUR_OBJECT_ID = 6L; private static final long KEY_FIVE_OBJECT_ID = 9L; - private static final long KEY_SIX_OBJECT_ID = 10L; - private static final long MULTI_BLOCK_KEY_OBJECT_ID = 13L; - private static final long KEY_SEVEN_OBJECT_ID = 13L; - private static final long VOL_TWO_OBJECT_ID = 14L; - private static final long BUCKET_THREE_OBJECT_ID = 15L; - private static final long BUCKET_FOUR_OBJECT_ID = 16L; private static final long KEY_EIGHT_OBJECT_ID = 17L; private static final long KEY_NINE_OBJECT_ID = 19L; private static final long KEY_TEN_OBJECT_ID = 20L; private static final long KEY_ELEVEN_OBJECT_ID = 21L; + private static final long MULTI_BLOCK_KEY_OBJECT_ID = 13L; // container IDs private static final long CONTAINER_ONE_ID = 1L; @@ -192,45 +189,43 @@ public class TestNSSummaryEndpointWithOBS { private static final long BLOCK_SIX_LENGTH = 6000L; // data size in bytes - private static final long KEY_ONE_SIZE = 500L; // bin 0 - private static final long KEY_TWO_SIZE = OzoneConsts.KB + 1; // bin 1 - private static final long KEY_THREE_SIZE = 4 * OzoneConsts.KB + 1; // bin 3 - private static final long KEY_FOUR_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 - private static final long KEY_FIVE_SIZE = 100L; // bin 0 - private static final long KEY_SIX_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 - private static final long KEY_SEVEN_SIZE = 4 * OzoneConsts.KB + 1; - private static final long KEY_EIGHT_SIZE = OzoneConsts.KB + 1; // bin 1 - private static final long KEY_NINE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 - private static final long KEY_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 - private static final long KEY_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE_ONE_SIZE = 500L; // bin 0 + private static final long FILE_TWO_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE_THREE_SIZE = 4 * OzoneConsts.KB + 1; // bin 3 + private static final long FILE_FOUR_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_FIVE_SIZE = 100L; // bin 0 + private static final long FILE_EIGHT_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE_NINE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1 private static final long FILE1_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_ONE_SIZE, + getReplicatedSize(FILE_ONE_SIZE, StandaloneReplicationConfig.getInstance(ONE)); private static final long FILE2_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_TWO_SIZE, + getReplicatedSize(FILE_TWO_SIZE, StandaloneReplicationConfig.getInstance(ONE)); private static final long FILE3_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_THREE_SIZE, + getReplicatedSize(FILE_THREE_SIZE, StandaloneReplicationConfig.getInstance(ONE)); private static final long FILE4_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_FOUR_SIZE, + getReplicatedSize(FILE_FOUR_SIZE, StandaloneReplicationConfig.getInstance(ONE)); private static final long FILE5_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_FIVE_SIZE, + getReplicatedSize(FILE_FIVE_SIZE, StandaloneReplicationConfig.getInstance(ONE)); private static final long FILE8_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_EIGHT_SIZE, + getReplicatedSize(FILE_EIGHT_SIZE, StandaloneReplicationConfig.getInstance(ONE)); private static final long FILE9_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_NINE_SIZE, + getReplicatedSize(FILE_NINE_SIZE, StandaloneReplicationConfig.getInstance(ONE)); private static final long FILE10_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_TEN_SIZE, + getReplicatedSize(FILE_TEN_SIZE, StandaloneReplicationConfig.getInstance(ONE)); private static final long FILE11_SIZE_WITH_REPLICA = - getReplicatedSize(KEY_ELEVEN_SIZE, + getReplicatedSize(FILE_ELEVEN_SIZE, StandaloneReplicationConfig.getInstance(ONE)); private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA @@ -277,30 +272,35 @@ public class TestNSSummaryEndpointWithOBS { // mock client's path requests private static final String TEST_USER = "TestUser"; private static final String ROOT_PATH = "/"; - private static final String VOL_PATH = "/vol"; - private static final String VOL_TWO_PATH = "/vol2"; - private static final String BUCKET_ONE_PATH = "/vol/bucket1"; - private static final String BUCKET_TWO_PATH = "/vol/bucket2"; - private static final String KEY_PATH = "/vol/bucket2/file4"; - private static final String MULTI_BLOCK_KEY_PATH = "/vol/bucket1/file3"; + private static final String VOL_PATH = ROOT_PATH + VOL; + private static final String VOL_TWO_PATH = ROOT_PATH + VOL_TWO; + private static final String BUCKET_ONE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE; + private static final String BUCKET_TWO_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO; + private static final String KEY_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + FILE_FOUR; + private static final String MULTI_BLOCK_KEY_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + FILE_THREE; private static final String INVALID_PATH = "/vol/path/not/found"; // some expected answers private static final long ROOT_DATA_SIZE = - KEY_ONE_SIZE + KEY_TWO_SIZE + KEY_THREE_SIZE + KEY_FOUR_SIZE + - KEY_FIVE_SIZE + KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + - KEY_ELEVEN_SIZE; - private static final long VOL_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + - KEY_THREE_SIZE + KEY_FOUR_SIZE + KEY_FIVE_SIZE; + FILE_ONE_SIZE + FILE_TWO_SIZE + FILE_THREE_SIZE + FILE_FOUR_SIZE + + FILE_FIVE_SIZE + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + + FILE_ELEVEN_SIZE; + private static final long VOL_DATA_SIZE = FILE_ONE_SIZE + FILE_TWO_SIZE + + FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE; private static final long VOL_TWO_DATA_SIZE = - KEY_EIGHT_SIZE + KEY_NINE_SIZE + KEY_TEN_SIZE + KEY_ELEVEN_SIZE; + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE; - private static final long BUCKET_ONE_DATA_SIZE = KEY_ONE_SIZE + KEY_TWO_SIZE + - KEY_THREE_SIZE; + private static final long BUCKET_ONE_DATA_SIZE = FILE_ONE_SIZE + + FILE_TWO_SIZE + + FILE_THREE_SIZE; private static final long BUCKET_TWO_DATA_SIZE = - KEY_FOUR_SIZE + KEY_FIVE_SIZE; + FILE_FOUR_SIZE + FILE_FIVE_SIZE; @BeforeEach @@ -478,7 +478,7 @@ public void testDiskUsageKey() throws Exception { false, false); DUResponse keyObj = (DUResponse) keyResponse.getEntity(); assertEquals(0, keyObj.getCount()); - assertEquals(KEY_FOUR_SIZE, keyObj.getSize()); + assertEquals(FILE_FOUR_SIZE, keyObj.getSize()); } @Test @@ -643,7 +643,7 @@ private void populateOMDB() throws Exception { // write all keys writeKeyToOm(reconOMMetadataManager, - KEY_ONE, + FILE_ONE, BUCKET_ONE, VOL, FILE_ONE, @@ -651,10 +651,10 @@ private void populateOMDB() throws Exception { BUCKET_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, - KEY_ONE_SIZE, + FILE_ONE_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - KEY_TWO, + FILE_TWO, BUCKET_ONE, VOL, FILE_TWO, @@ -662,10 +662,10 @@ private void populateOMDB() throws Exception { BUCKET_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, - KEY_TWO_SIZE, + FILE_TWO_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - KEY_THREE, + FILE_THREE, BUCKET_ONE, VOL, FILE_THREE, @@ -673,10 +673,10 @@ private void populateOMDB() throws Exception { BUCKET_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, - KEY_THREE_SIZE, + FILE_THREE_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - KEY_FOUR, + FILE_FOUR, BUCKET_TWO, VOL, FILE_FOUR, @@ -684,10 +684,10 @@ private void populateOMDB() throws Exception { BUCKET_TWO_OBJECT_ID, BUCKET_TWO_OBJECT_ID, VOL_OBJECT_ID, - KEY_FOUR_SIZE, + FILE_FOUR_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - KEY_FIVE, + FILE_FIVE, BUCKET_TWO, VOL, FILE_FIVE, @@ -695,11 +695,11 @@ private void populateOMDB() throws Exception { BUCKET_TWO_OBJECT_ID, BUCKET_TWO_OBJECT_ID, VOL_OBJECT_ID, - KEY_FIVE_SIZE, + FILE_FIVE_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - KEY_EIGHT, + FILE_EIGHT, BUCKET_THREE, VOL_TWO, FILE_EIGHT, @@ -707,10 +707,10 @@ private void populateOMDB() throws Exception { BUCKET_THREE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, - KEY_EIGHT_SIZE, + FILE_EIGHT_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - KEY_NINE, + FILE_NINE, BUCKET_THREE, VOL_TWO, FILE_NINE, @@ -718,10 +718,10 @@ private void populateOMDB() throws Exception { BUCKET_THREE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, - KEY_NINE_SIZE, + FILE_NINE_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - KEY_TEN, + FILE_TEN, BUCKET_THREE, VOL_TWO, FILE_TEN, @@ -729,10 +729,10 @@ private void populateOMDB() throws Exception { BUCKET_THREE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, - KEY_TEN_SIZE, + FILE_TEN_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - KEY_ELEVEN, + FILE_ELEVEN, BUCKET_FOUR, VOL_TWO, FILE_ELEVEN, @@ -740,7 +740,7 @@ private void populateOMDB() throws Exception { PARENT_OBJECT_ID_ZERO, BUCKET_FOUR_OBJECT_ID, VOL_TWO_OBJECT_ID, - KEY_ELEVEN_SIZE, + FILE_ELEVEN_SIZE, getBucketLayout()); } @@ -838,7 +838,7 @@ private void setUpMultiBlockKey() throws IOException { // add the multi-block key to Recon's OM writeKeyToOm(reconOMMetadataManager, - MULTI_BLOCK_KEY, + MULTI_BLOCK_FILE, BUCKET_ONE, VOL, MULTI_BLOCK_FILE, @@ -848,7 +848,7 @@ private void setUpMultiBlockKey() throws IOException { VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup), getBucketLayout(), - KEY_SEVEN_SIZE); + FILE_THREE_SIZE); } private OmKeyLocationInfoGroup getLocationInfoGroup1() { @@ -911,7 +911,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { //vol/bucket1/file1 writeKeyToOm(reconOMMetadataManager, - KEY_ONE, + FILE_ONE, BUCKET_ONE, VOL, FILE_ONE, @@ -921,11 +921,11 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup1), getBucketLayout(), - KEY_ONE_SIZE); + FILE_ONE_SIZE); //vol/bucket1/file2 writeKeyToOm(reconOMMetadataManager, - KEY_TWO, + FILE_TWO, BUCKET_ONE, VOL, FILE_TWO, @@ -935,11 +935,11 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup2), getBucketLayout(), - KEY_TWO_SIZE); + FILE_TWO_SIZE); //vol/bucket1/file3 writeKeyToOm(reconOMMetadataManager, - KEY_THREE, + FILE_THREE, BUCKET_ONE, VOL, FILE_THREE, @@ -949,11 +949,11 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup1), getBucketLayout(), - KEY_THREE_SIZE); + FILE_THREE_SIZE); //vol/bucket2/file4 writeKeyToOm(reconOMMetadataManager, - KEY_FOUR, + FILE_FOUR, BUCKET_TWO, VOL, FILE_FOUR, @@ -963,11 +963,11 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup2), getBucketLayout(), - KEY_FOUR_SIZE); + FILE_FOUR_SIZE); //vol/bucket2/file5 writeKeyToOm(reconOMMetadataManager, - KEY_FIVE, + FILE_FIVE, BUCKET_TWO, VOL, FILE_FIVE, @@ -977,11 +977,11 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup1), getBucketLayout(), - KEY_FIVE_SIZE); + FILE_FIVE_SIZE); //vol2/bucket3/file8 writeKeyToOm(reconOMMetadataManager, - KEY_EIGHT, + FILE_EIGHT, BUCKET_THREE, VOL_TWO, FILE_EIGHT, @@ -991,11 +991,11 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup2), getBucketLayout(), - KEY_EIGHT_SIZE); + FILE_EIGHT_SIZE); //vol2/bucket3/file9 writeKeyToOm(reconOMMetadataManager, - KEY_NINE, + FILE_NINE, BUCKET_THREE, VOL_TWO, FILE_NINE, @@ -1005,11 +1005,11 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup1), getBucketLayout(), - KEY_NINE_SIZE); + FILE_NINE_SIZE); //vol2/bucket3/file10 writeKeyToOm(reconOMMetadataManager, - KEY_TEN, + FILE_TEN, BUCKET_THREE, VOL_TWO, FILE_TEN, @@ -1019,11 +1019,11 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup2), getBucketLayout(), - KEY_TEN_SIZE); + FILE_TEN_SIZE); //vol2/bucket4/file11 writeKeyToOm(reconOMMetadataManager, - KEY_ELEVEN, + FILE_ELEVEN, BUCKET_FOUR, VOL_TWO, FILE_ELEVEN, @@ -1033,7 +1033,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup1), getBucketLayout(), - KEY_ELEVEN_SIZE); + FILE_ELEVEN_SIZE); } /** From e1120aedc3773c68d3ad8602ba533100483f71d4 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 29 Feb 2024 17:23:38 +0530 Subject: [PATCH 30/31] Change the names of variable constants --- .../api/TestNSSummaryEndpointWithOBS.java | 96 +++++++++---------- .../recon/tasks/TestNSSummaryTaskWithOBS.java | 2 +- 2 files changed, 49 insertions(+), 49 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java index b68425a2edd6..ac8dee5f0937 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java @@ -135,16 +135,16 @@ public class TestNSSummaryEndpointWithOBS { private static final String BUCKET_TWO = "bucket2"; private static final String BUCKET_THREE = "bucket3"; private static final String BUCKET_FOUR = "bucket4"; - private static final String FILE_ONE = "file1"; - private static final String FILE_TWO = "file2"; - private static final String FILE_THREE = "file3"; - private static final String FILE_FOUR = "file4"; - private static final String FILE_FIVE = "file5"; - private static final String FILE_EIGHT = "file8"; - private static final String FILE_NINE = "file9"; - private static final String FILE_TEN = "file10"; - private static final String FILE_ELEVEN = "file11"; - private static final String MULTI_BLOCK_FILE = FILE_THREE; + private static final String KEY_ONE = "file1"; + private static final String KEY_TWO = "file2"; + private static final String KEY_THREE = "file3"; + private static final String KEY_FOUR = "file4"; + private static final String KEY_FIVE = "file5"; + private static final String KEY_EIGHT = "file8"; + private static final String KEY_NINE = "file9"; + private static final String KEY_TEN = "file10"; + private static final String KEY_ELEVEN = "file11"; + private static final String MULTI_BLOCK_FILE = KEY_THREE; private static final long PARENT_OBJECT_ID_ZERO = 0L; private static final long VOL_OBJECT_ID = 0L; @@ -279,9 +279,9 @@ public class TestNSSummaryEndpointWithOBS { private static final String BUCKET_TWO_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO; private static final String KEY_PATH = - ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + FILE_FOUR; + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; private static final String MULTI_BLOCK_KEY_PATH = - ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + FILE_THREE; + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE; private static final String INVALID_PATH = "/vol/path/not/found"; // some expected answers @@ -643,10 +643,10 @@ private void populateOMDB() throws Exception { // write all keys writeKeyToOm(reconOMMetadataManager, - FILE_ONE, + KEY_ONE, BUCKET_ONE, VOL, - FILE_ONE, + KEY_ONE, KEY_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, @@ -654,10 +654,10 @@ private void populateOMDB() throws Exception { FILE_ONE_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - FILE_TWO, + KEY_TWO, BUCKET_ONE, VOL, - FILE_TWO, + KEY_TWO, KEY_TWO_OBJECT_ID, BUCKET_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, @@ -665,10 +665,10 @@ private void populateOMDB() throws Exception { FILE_TWO_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - FILE_THREE, + KEY_THREE, BUCKET_ONE, VOL, - FILE_THREE, + KEY_THREE, KEY_THREE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, @@ -676,10 +676,10 @@ private void populateOMDB() throws Exception { FILE_THREE_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - FILE_FOUR, + KEY_FOUR, BUCKET_TWO, VOL, - FILE_FOUR, + KEY_FOUR, KEY_FOUR_OBJECT_ID, BUCKET_TWO_OBJECT_ID, BUCKET_TWO_OBJECT_ID, @@ -687,10 +687,10 @@ private void populateOMDB() throws Exception { FILE_FOUR_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - FILE_FIVE, + KEY_FIVE, BUCKET_TWO, VOL, - FILE_FIVE, + KEY_FIVE, KEY_FIVE_OBJECT_ID, BUCKET_TWO_OBJECT_ID, BUCKET_TWO_OBJECT_ID, @@ -699,10 +699,10 @@ private void populateOMDB() throws Exception { getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - FILE_EIGHT, + KEY_EIGHT, BUCKET_THREE, VOL_TWO, - FILE_EIGHT, + KEY_EIGHT, KEY_EIGHT_OBJECT_ID, BUCKET_THREE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, @@ -710,10 +710,10 @@ private void populateOMDB() throws Exception { FILE_EIGHT_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - FILE_NINE, + KEY_NINE, BUCKET_THREE, VOL_TWO, - FILE_NINE, + KEY_NINE, KEY_NINE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, @@ -721,10 +721,10 @@ private void populateOMDB() throws Exception { FILE_NINE_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - FILE_TEN, + KEY_TEN, BUCKET_THREE, VOL_TWO, - FILE_TEN, + KEY_TEN, KEY_TEN_OBJECT_ID, BUCKET_THREE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, @@ -732,10 +732,10 @@ private void populateOMDB() throws Exception { FILE_TEN_SIZE, getBucketLayout()); writeKeyToOm(reconOMMetadataManager, - FILE_ELEVEN, + KEY_ELEVEN, BUCKET_FOUR, VOL_TWO, - FILE_ELEVEN, + KEY_ELEVEN, KEY_ELEVEN_OBJECT_ID, PARENT_OBJECT_ID_ZERO, BUCKET_FOUR_OBJECT_ID, @@ -911,10 +911,10 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { //vol/bucket1/file1 writeKeyToOm(reconOMMetadataManager, - FILE_ONE, + KEY_ONE, BUCKET_ONE, VOL, - FILE_ONE, + KEY_ONE, KEY_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, @@ -925,10 +925,10 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { //vol/bucket1/file2 writeKeyToOm(reconOMMetadataManager, - FILE_TWO, + KEY_TWO, BUCKET_ONE, VOL, - FILE_TWO, + KEY_TWO, KEY_TWO_OBJECT_ID, BUCKET_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, @@ -939,10 +939,10 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { //vol/bucket1/file3 writeKeyToOm(reconOMMetadataManager, - FILE_THREE, + KEY_THREE, BUCKET_ONE, VOL, - FILE_THREE, + KEY_THREE, KEY_THREE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, @@ -953,10 +953,10 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { //vol/bucket2/file4 writeKeyToOm(reconOMMetadataManager, - FILE_FOUR, + KEY_FOUR, BUCKET_TWO, VOL, - FILE_FOUR, + KEY_FOUR, KEY_FOUR_OBJECT_ID, BUCKET_TWO_OBJECT_ID, BUCKET_TWO_OBJECT_ID, @@ -967,10 +967,10 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { //vol/bucket2/file5 writeKeyToOm(reconOMMetadataManager, - FILE_FIVE, + KEY_FIVE, BUCKET_TWO, VOL, - FILE_FIVE, + KEY_FIVE, KEY_FIVE_OBJECT_ID, BUCKET_TWO_OBJECT_ID, BUCKET_TWO_OBJECT_ID, @@ -981,10 +981,10 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { //vol2/bucket3/file8 writeKeyToOm(reconOMMetadataManager, - FILE_EIGHT, + KEY_EIGHT, BUCKET_THREE, VOL_TWO, - FILE_EIGHT, + KEY_EIGHT, KEY_EIGHT_OBJECT_ID, BUCKET_THREE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, @@ -995,10 +995,10 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { //vol2/bucket3/file9 writeKeyToOm(reconOMMetadataManager, - FILE_NINE, + KEY_NINE, BUCKET_THREE, VOL_TWO, - FILE_NINE, + KEY_NINE, KEY_NINE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, @@ -1009,10 +1009,10 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { //vol2/bucket3/file10 writeKeyToOm(reconOMMetadataManager, - FILE_TEN, + KEY_TEN, BUCKET_THREE, VOL_TWO, - FILE_TEN, + KEY_TEN, KEY_TEN_OBJECT_ID, BUCKET_THREE_OBJECT_ID, BUCKET_THREE_OBJECT_ID, @@ -1023,10 +1023,10 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { //vol2/bucket4/file11 writeKeyToOm(reconOMMetadataManager, - FILE_ELEVEN, + KEY_ELEVEN, BUCKET_FOUR, VOL_TWO, - FILE_ELEVEN, + KEY_ELEVEN, KEY_ELEVEN_OBJECT_ID, BUCKET_FOUR_OBJECT_ID, BUCKET_FOUR_OBJECT_ID, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index 5b33a8a573ad..8f9d6b2990a5 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -220,7 +220,7 @@ public void testReprocessFileBucketSize() { } /** - * Nested class for testing NSSummaryTaskWithLegacy process. + * Nested class for testing NSSummaryTaskWithOBS process. */ @Nested public class TestProcess { From 0555a7f7a9ddf5d58c2db427349560273037f694 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 29 Feb 2024 17:52:35 +0530 Subject: [PATCH 31/31] Fixed potential NPE being thrown by value --- .../hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java index af2dee802e39..34c7dc967c3a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java @@ -133,11 +133,16 @@ public boolean processWithOBS(OMUpdateEventBatch events) { OMDBUpdateEvent keyTableUpdateEvent = omdbUpdateEvent; Object value = keyTableUpdateEvent.getValue(); Object oldValue = keyTableUpdateEvent.getOldValue(); - if (!(value instanceof OmKeyInfo)) { + if (value == null) { + LOG.warn("Value is null for key {}. Skipping processing.", + updatedKey); + continue; + } else if (!(value instanceof OmKeyInfo)) { LOG.warn("Unexpected value type {} for key {}. Skipping processing.", value.getClass().getName(), updatedKey); continue; } + OmKeyInfo updatedKeyInfo = (OmKeyInfo) value; OmKeyInfo oldKeyInfo = (OmKeyInfo) oldValue;