From 7f829bf5edeaabe1c90f9b178445f15e8c115eac Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 18 Feb 2024 15:55:35 +0530 Subject: [PATCH 01/32] HDDS-10389 Implement a search feature for users to locate open keys within the Open Keys Insights section. --- .../recon/api/OMDBInsightSearchEndpoint.java | 240 ++++++++++++++ .../api/TestOMDBInsightSearchEndpoint.java | 308 ++++++++++++++++++ 2 files changed, 548 insertions(+) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java create mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java new file mode 100644 index 000000000000..546c3f1e8d8e --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -0,0 +1,240 @@ +package org.apache.hadoop.ozone.recon.api; + +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; +import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; +import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; +import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.inject.Inject; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.QueryParam; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; +import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath; + +/** + * REST endpoint for search implementation in OM DB Insight. + */ +@Path("/insights") +@Produces(MediaType.APPLICATION_JSON) +@AdminOnly +public class OMDBInsightSearchEndpoint { + + @Inject + private ContainerEndpoint containerEndpoint; + private OzoneStorageContainerManager reconSCM; + @Inject + private ReconContainerMetadataManager reconContainerMetadataManager; + private final ReconOMMetadataManager omMetadataManager; + private final ReconContainerManager containerManager; + private static final Logger LOG = + LoggerFactory.getLogger(OMDBInsightSearchEndpoint.class); + private final GlobalStatsDao globalStatsDao; + private ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager; + + + @Inject + public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, + ReconOMMetadataManager omMetadataManager, + GlobalStatsDao globalStatsDao, + ReconNamespaceSummaryManagerImpl + reconNamespaceSummaryManager) { + this.reconSCM = reconSCM; + this.containerManager = + (ReconContainerManager) reconSCM.getContainerManager(); + this.omMetadataManager = omMetadataManager; + this.globalStatsDao = globalStatsDao; + this.reconNamespaceSummaryManager = reconNamespaceSummaryManager; + } + + + /** + * Search for open keys in the OM DB Insight. + * + * @param searchPrefix The prefix to search for. + * @param limit The maximum number of keys to return. + * @return The response containing the matched keys. + * @throws IOException if an error occurs during the search. + */ + @GET + @Path("/openKeys/search") + public Response searchOpenKeys( + @QueryParam("searchPrefix") String searchPrefix, + @DefaultValue("10") @QueryParam("limit") int limit) + throws IOException { + + if (searchPrefix == null || searchPrefix.isEmpty()) { + return createBadRequestResponse( + "searchPrefix query parameter is required."); + } + List matchedKeys = new ArrayList<>(); + + for (BucketLayout layout : Arrays.asList( + BucketLayout.LEGACY, BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + + Table openKeyTable = + omMetadataManager.getOpenKeyTable(layout); + try ( + TableIterator> + keyIter = openKeyTable.iterator()) { + if (layout == BucketLayout.FILE_SYSTEM_OPTIMIZED) { + searchPrefix = convertToObjectPath(searchPrefix); + } + keyIter.seek(searchPrefix); + while (keyIter.hasNext() && matchedKeys.size() < limit) { + Table.KeyValue entry = keyIter.next(); + String key = entry.getKey(); + // Break if the key no longer matches the prefix + if (!key.startsWith(searchPrefix)) { + break; + } + OmKeyInfo omKeyInfo = entry.getValue(); + // Add it to the list of matched keys. + matchedKeys.add(omKeyInfo); + } + } catch (Exception e) { + return noMatchedKeysResponse(searchPrefix); + } + } + if (matchedKeys.isEmpty()) { + return noMatchedKeysResponse(searchPrefix); + } else { + return Response.ok(matchedKeys).build(); + } + } + + /** + * Converts a given key prefix to an object path for FSO buckets. + * The conversion is necessary because keys in FSO buckets are stored in a + * object format in the OpenFileTable, + * e.g., "/volumeId/bucketId/parentId/fileName/id -> KeyInfo". + * + * @param prevKeyPrefix The key prefix to convert. + * @return The object path for the given key prefix. + * @throws IOException if an error occurs during conversion. + */ + public String convertToObjectPath(String prevKeyPrefix) throws IOException { + if (prevKeyPrefix.isEmpty()) { + return ""; + } + + // Fetch the volumeID + try { + String[] names = parseRequestPath(normalizePath(prevKeyPrefix)); + String volumeName = names[0]; + String volumeKey = omMetadataManager.getVolumeKey(volumeName); + long volumeId = + omMetadataManager.getVolumeTable().getSkipCache(volumeKey).getObjectID(); + + if (names.length == 1) { + return constructObjectPathWithPrefix(volumeId); + } + + // Fetch the bucketID + String bucketName = names[1]; + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo bucketInfo = + omMetadataManager.getBucketTable().getSkipCache(bucketKey); + long bucketId = bucketInfo.getObjectID(); + + if (names.length == 2) { + return constructObjectPathWithPrefix(volumeId, bucketId); + } + + // Fetch the intermediate parentID + BucketHandler handler = + getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, + reconSCM, bucketInfo); + long parentId = getParentId(handler, names, bucketName, bucketId); + String keyName = names[names.length - 1]; + return constructObjectPathWithPrefix(volumeId, bucketId, parentId) + + OM_KEY_PREFIX + keyName; + } catch (IOException e) { + LOG.error("Error converting key prefix to object path: {}", prevKeyPrefix, + e); + return prevKeyPrefix; // Fallback to original prefix in case of exception + } catch (Exception e) { + LOG.error("Unexpected error during conversion: {}", prevKeyPrefix, e); + return prevKeyPrefix; + } + } + + private long getParentId(BucketHandler handler, String[] names, + String bucketName, long bucketId) + throws IOException { + String parentName = names[names.length - 2]; + if (bucketName.equals(parentName) && names.length == 3) { + return bucketId; + } + return handler.getDirObjectId(names, names.length - 1); + } + + /** + * Constructs an object path with the given IDs. + * + * @param ids The IDs to construct the object path with. + * @return The constructed object path. + */ + private String constructObjectPathWithPrefix(long... ids) { + StringBuilder pathBuilder = new StringBuilder(); + for (long id : ids) { + pathBuilder.append(OM_KEY_PREFIX).append(id); + } + return pathBuilder.toString(); + } + + /** + * Returns a response indicating that no keys matched the search prefix. + * + * @param searchPrefix The search prefix that was used. + * @return The response indicating that no keys matched the search prefix. + */ + private Response noMatchedKeysResponse(String searchPrefix) { + String message = + "{\"message\": \"No keys exist for the specified search prefix"; + message += ".\"}"; + return Response.status(Response.Status.NOT_FOUND) + .entity(message) + .type(MediaType.APPLICATION_JSON) + .build(); + } + + /** + * Utility method to create a bad request response with a custom message. + * @param message The message to include in the response body. + * @return A Response object configured with the provided message. + */ + private Response createBadRequestResponse(String message) { + String jsonResponse = String.format("{\"message\": \"%s\"}", message); + return Response.status(Response.Status.BAD_REQUEST) + .entity(jsonResponse) + .type(MediaType.APPLICATION_JSON) + .build(); + } + + private static String normalizePath(String path) { + return OM_KEY_PREFIX + OmUtils.normalizeKey(path, false); + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java new file mode 100644 index 000000000000..91fcfcdc841e --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -0,0 +1,308 @@ +package org.apache.hadoop.ozone.recon.api; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; +import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; +import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import javax.ws.rs.core.Response; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; + +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.MockitoAnnotations.openMocks; + +/** + * Test for OMDBInsightSearchEndpoint. + */ +public class TestOMDBInsightSearchEndpoint extends AbstractReconSqlDBTest { + @TempDir + private Path temporaryFolder; + private OzoneStorageContainerManager ozoneStorageContainerManager; + private ReconContainerMetadataManager reconContainerMetadataManager; + private OMMetadataManager omMetadataManager; + private ReconPipelineManager reconPipelineManager; + private ReconOMMetadataManager reconOMMetadataManager; + private OMDBInsightSearchEndpoint omdbInsightSearchEndpoint; + private Pipeline pipeline; + private Random random = new Random(); + private OzoneConfiguration ozoneConfiguration; + private Set generatedIds = new HashSet<>(); + public TestOMDBInsightSearchEndpoint() { + super(); + } + + private long generateUniqueRandomLong() { + long newValue; + do { + newValue = random.nextLong(); + } while (generatedIds.contains(newValue)); + + generatedIds.add(newValue); + return newValue; + } + + @BeforeEach + public void setUp() throws Exception { + omMetadataManager = initializeNewOmMetadataManager( + Files.createDirectory(temporaryFolder.resolve( + "JunitOmMetadata")).toFile()); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + Files.createDirectory(temporaryFolder.resolve( + "JunitOmMetadataTest")).toFile()); + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconSqlDb() + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(mock(OzoneManagerServiceProviderImpl.class)) + .addBinding(OzoneStorageContainerManager.class, + ReconStorageContainerManagerFacade.class) + .withContainerDB() + .addBinding(StorageContainerServiceProvider.class, + mock(StorageContainerServiceProviderImpl.class)) + .addBinding(OMDBInsightEndpoint.class) + .addBinding(ContainerHealthSchemaManager.class) + .build(); + reconContainerMetadataManager = + reconTestInjector.getInstance(ReconContainerMetadataManager.class); + omdbInsightSearchEndpoint = reconTestInjector.getInstance( + OMDBInsightSearchEndpoint.class); + ozoneStorageContainerManager = + reconTestInjector.getInstance(OzoneStorageContainerManager.class); + reconPipelineManager = (ReconPipelineManager) + ozoneStorageContainerManager.getPipelineManager(); + pipeline = getRandomPipeline(); + reconPipelineManager.addPipeline(pipeline); + ozoneConfiguration = new OzoneConfiguration(); + openMocks(this); + } + + @Test + public void testSearchOpenKeys() throws Exception { + // Create 3 keys in 'bucketOne' and 2 keys in 'bucketTwo' + for (int i = 1; i <= 3; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "key_" + i, true); + String keyPath = String.format("/sampleVol/%s/key_%d", "bucketOne", i); + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put(keyPath, omKeyInfo); + } + + for (int i = 1; i <= 2; i++) { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketTwo", "key_" + i, true); + String keyPath = String.format("/sampleVol/%s/key_%d", "bucketTwo", i); + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put(keyPath, omKeyInfo); + } + + // Search for keys in 'bucketOne' + String searchPrefixBucketOne = "/sampleVol/bucketOne/"; + Response responseBucketOne = + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixBucketOne, 10); + + // Assert that the search response for 'bucketOne' is OK and verify the results + assertEquals(Response.Status.OK.getStatusCode(), + responseBucketOne.getStatus()); + List searchResultsBucketOne = + (List) responseBucketOne.getEntity(); + assertNotNull(searchResultsBucketOne); + assertEquals(3, searchResultsBucketOne.size()); + + searchResultsBucketOne.forEach(keyInfo -> { + assertTrue(keyInfo.getKeyName().startsWith("key_")); + assertTrue(keyInfo.getVolumeName().startsWith("sampleVol")); + assertTrue(keyInfo.getBucketName().startsWith("bucketOne")); + }); + + // Search for keys in 'bucketTwo' + String searchPrefixBucketTwo = "/sampleVol/bucketTwo/"; + Response responseBucketTwo = + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixBucketTwo, 10); + + // Assert that the search response for 'bucketTwo' is OK and verify the results + assertEquals(Response.Status.OK.getStatusCode(), + responseBucketTwo.getStatus()); + List searchResultsBucketTwo = + (List) responseBucketTwo.getEntity(); + assertNotNull(searchResultsBucketTwo); + assertEquals(2, searchResultsBucketTwo.size()); + + searchResultsBucketTwo.forEach(keyInfo -> { + assertTrue(keyInfo.getKeyName().startsWith("key_")); + assertTrue(keyInfo.getVolumeName().startsWith("sampleVol")); + assertTrue(keyInfo.getBucketName().startsWith("bucketTwo")); + }); + } + + @Test + public void testConvertToObjectPathForEmptyPrefix() throws Exception { + String result = omdbInsightSearchEndpoint.convertToObjectPath(""); + assertEquals("", result, "Expected an empty string for empty input"); + } + + @Test + public void testConvertToObjectPathForVolumeOnly() throws Exception { + setupVolume("vol1", 100L); + + String result = omdbInsightSearchEndpoint.convertToObjectPath("/vol1"); + assertEquals("/100", result, "Incorrect conversion for volume only path"); + } + + @Test + public void testConvertToObjectPathForVolumeAndBucket() throws Exception { + setupVolume("vol1", 100L); + setupBucket("vol1", "bucketOne", 200L, BucketLayout.LEGACY); + + String result = + omdbInsightSearchEndpoint.convertToObjectPath("vol1/bucketOne"); + assertEquals("/100/200", result, + "Incorrect conversion for volume and bucket path"); + } + + @Test + public void testSearchOpenKeysWithDifferentBucketLayouts() throws Exception { + // Setup for LEGACY bucket layout + OmKeyInfo legacyKeyInfo = + getOmKeyInfo("sampleVol", "legacyBucket", "legacy_key_1", true); + String legacyKeyPath = + String.format("/sampleVol/%s/%s", "legacyBucket", "legacy_key_1"); + reconOMMetadataManager.getOpenKeyTable(BucketLayout.LEGACY) + .put(legacyKeyPath, legacyKeyInfo); + + // Setup for FSO bucket layout + OmKeyInfo fsoKeyInfo = + getOmKeyInfo("sampleVol", "fsoBucket", "fso_key_1", true); + setupBucket("sampleVol", "fsoBucket", 200L, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + long fsoKeyObjectId = fsoKeyInfo.getObjectID(); + long bucketId = 200L; + long volumeId = 0L; + String fsoKeyPath = String.format("/%s/%s/%s", volumeId, bucketId, + fsoKeyInfo.getObjectID()); + reconOMMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .put(fsoKeyPath, fsoKeyInfo); + + // Search for keys under the LEGACY bucket + String searchPrefixLegacy = "/sampleVol/legacyBucket/"; + Response responseLegacy = + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixLegacy, 10); + + // Verify response for LEGACY bucket layout + assertEquals(Response.Status.OK.getStatusCode(), + responseLegacy.getStatus()); + List searchResultsLegacy = + (List) responseLegacy.getEntity(); + assertNotNull(searchResultsLegacy); + assertEquals(1, searchResultsLegacy.size()); + assertTrue( + searchResultsLegacy.get(0).getKeyName().startsWith("legacy_key_")); + assertEquals("legacyBucket", searchResultsLegacy.get(0).getBucketName()); + + // Search for keys under the FSO bucket + String searchPrefixFso = "/sampleVol/fsoBucket/"; + Response responseFso = + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixFso, 10); + + // Verify response for FSO bucket layout + assertEquals(Response.Status.OK.getStatusCode(), responseFso.getStatus()); + List searchResultsFso = + (List) responseFso.getEntity(); + assertNotNull(searchResultsFso); + assertEquals(1, searchResultsFso.size()); + assertTrue(searchResultsFso.get(0).getKeyName().startsWith("fso_key_")); + assertEquals("fsoBucket", searchResultsFso.get(0).getBucketName()); + + // Pass only the volume name and verify the response. + String searchPrefixVolume = "/sampleVol/"; + Response responseVolume = + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixVolume, 10); + + // Verify response for volume name + assertEquals(Response.Status.OK.getStatusCode(), + responseVolume.getStatus()); + List searchResultsVolume = + (List) responseVolume.getEntity(); + assertNotNull(searchResultsVolume); + assertEquals(2, searchResultsVolume.size()); + } + + private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, + String keyName, boolean isFile) { + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setFile(isFile) + .setObjectID(generateUniqueRandomLong()) + .setReplicationConfig(StandaloneReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.ONE)) + .setDataSize(random.nextLong()) + .build(); + } + + public void setupVolume(String volumeName, long volumeId) throws Exception { + Table volumeTable = + reconOMMetadataManager.getVolumeTable(); + + OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() + .setVolume(volumeName) + .setAdminName("TestUser") + .setOwnerName("TestUser") + .setObjectID(volumeId) + .build(); + // Insert the volume into the table + volumeTable.put(reconOMMetadataManager.getVolumeKey(volumeName), + omVolumeArgs); + } + + private void setupBucket(String volumeName, String bucketName, long bucketId, + BucketLayout layout) + throws Exception { + Table bucketTable = + reconOMMetadataManager.getBucketTable(); + + OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setObjectID(bucketId) + .setBucketLayout(layout) + .build(); + + String bucketKey = + reconOMMetadataManager.getBucketKey(volumeName, bucketName); + bucketTable.put(bucketKey, omBucketInfo); + } + +} From 1276b141ac13c73a56d540982ab396df145f1780 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 18 Feb 2024 16:02:21 +0530 Subject: [PATCH 02/32] Added license to the newly added classes --- .../recon/api/OMDBInsightSearchEndpoint.java | 18 ++++++++++++++++++ .../api/TestOMDBInsightSearchEndpoint.java | 18 ++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 546c3f1e8d8e..92af4ebd9812 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.ozone.recon.api; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 91fcfcdc841e..154b56898e3f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.ozone.recon.api; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; From 9b8b7b7e228f6522c80f300c0606b9b59e641c6a Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 18 Feb 2024 16:06:04 +0530 Subject: [PATCH 03/32] Removed unecessary instances from injector --- .../recon/api/TestOMDBInsightSearchEndpoint.java | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 154b56898e3f..f08d758cbb91 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -59,7 +59,6 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; -import static org.mockito.MockitoAnnotations.openMocks; /** * Test for OMDBInsightSearchEndpoint. @@ -67,15 +66,10 @@ public class TestOMDBInsightSearchEndpoint extends AbstractReconSqlDBTest { @TempDir private Path temporaryFolder; - private OzoneStorageContainerManager ozoneStorageContainerManager; - private ReconContainerMetadataManager reconContainerMetadataManager; private OMMetadataManager omMetadataManager; - private ReconPipelineManager reconPipelineManager; private ReconOMMetadataManager reconOMMetadataManager; private OMDBInsightSearchEndpoint omdbInsightSearchEndpoint; - private Pipeline pipeline; private Random random = new Random(); - private OzoneConfiguration ozoneConfiguration; private Set generatedIds = new HashSet<>(); public TestOMDBInsightSearchEndpoint() { super(); @@ -112,18 +106,8 @@ public void setUp() throws Exception { .addBinding(OMDBInsightEndpoint.class) .addBinding(ContainerHealthSchemaManager.class) .build(); - reconContainerMetadataManager = - reconTestInjector.getInstance(ReconContainerMetadataManager.class); omdbInsightSearchEndpoint = reconTestInjector.getInstance( OMDBInsightSearchEndpoint.class); - ozoneStorageContainerManager = - reconTestInjector.getInstance(OzoneStorageContainerManager.class); - reconPipelineManager = (ReconPipelineManager) - ozoneStorageContainerManager.getPipelineManager(); - pipeline = getRandomPipeline(); - reconPipelineManager.addPipeline(pipeline); - ozoneConfiguration = new OzoneConfiguration(); - openMocks(this); } @Test From 4f60c61b0fb9a2b2733394c73beac265e3a777ce Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 18 Feb 2024 17:10:09 +0530 Subject: [PATCH 04/32] Fixed checkstyle and findBugs --- .../ozone/recon/api/OMDBInsightSearchEndpoint.java | 2 +- .../ozone/recon/api/TestOMDBInsightSearchEndpoint.java | 9 ++------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 92af4ebd9812..3275e57083c6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -131,7 +131,7 @@ public Response searchOpenKeys( // Add it to the list of matched keys. matchedKeys.add(omKeyInfo); } - } catch (Exception e) { + } catch (NullPointerException e) { return noMatchedKeysResponse(searchPrefix); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index f08d758cbb91..8eafd3ea8ec0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -19,9 +19,7 @@ package org.apache.hadoop.ozone.recon.api; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -33,9 +31,7 @@ import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; -import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; @@ -53,7 +49,6 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -211,8 +206,8 @@ public void testSearchOpenKeysWithDifferentBucketLayouts() throws Exception { long fsoKeyObjectId = fsoKeyInfo.getObjectID(); long bucketId = 200L; long volumeId = 0L; - String fsoKeyPath = String.format("/%s/%s/%s", volumeId, bucketId, - fsoKeyInfo.getObjectID()); + String fsoKeyPath = + String.format("/%s/%s/%s", volumeId, bucketId, fsoKeyObjectId); reconOMMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED) .put(fsoKeyPath, fsoKeyInfo); From 7e0838c82478a646336c798091913744fbee8adc Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 10 Mar 2024 01:40:05 +0530 Subject: [PATCH 05/32] Added prefix based searching for FSO open keys --- .../recon/api/OMDBInsightSearchEndpoint.java | 273 ++++++++++++++---- .../recon/api/handlers/EntityHandler.java | 2 +- 2 files changed, 217 insertions(+), 58 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 3275e57083c6..28acd9282fdf 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -18,14 +18,16 @@ package org.apache.hadoop.ozone.recon.api; +import org.antlr.v4.runtime.misc.Pair; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.*; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; +import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; @@ -44,11 +46,12 @@ import javax.ws.rs.core.Response; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; +import java.util.Set; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; +import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath; /** @@ -97,74 +100,236 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, */ @GET @Path("/openKeys/search") - public Response searchOpenKeys( - @QueryParam("searchPrefix") String searchPrefix, - @DefaultValue("10") @QueryParam("limit") int limit) + public Response searchOpenKeys(@QueryParam("searchPrefix") String searchPrefix, + @DefaultValue("10") @QueryParam("limit") int limit) throws IOException { + if (searchPrefix == null || searchPrefix.trim().isEmpty()) { + return createBadRequestResponse("The searchPrefix query parameter is required."); + } + + KeyInsightInfoResponse insightResponse = new KeyInsightInfoResponse(); + List fsoKeyInfoList = new ArrayList<>(); + List nonFsoKeyInfoList = new ArrayList<>(); + long replicatedTotal = 0; + long unreplicatedTotal = 0; + + // Fetch keys from OBS layout + List obsKeys = searchOpenKeysInOBS(searchPrefix, limit); + for (OmKeyInfo keyInfo : obsKeys) { + KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(keyInfo); + nonFsoKeyInfoList.add(keyEntityInfo); // Add to non-FSO list + replicatedTotal += keyInfo.getReplicatedSize(); + unreplicatedTotal += keyInfo.getDataSize() - keyInfo.getReplicatedSize(); + } - if (searchPrefix == null || searchPrefix.isEmpty()) { - return createBadRequestResponse( - "searchPrefix query parameter is required."); + // Fetch keys from FSO layout, if the limit is not yet reached + List fsoKeys = searchOpenKeysInFSO(searchPrefix, limit); + for (OmKeyInfo keyInfo : fsoKeys) { + KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(keyInfo); + fsoKeyInfoList.add(keyEntityInfo); // Add to FSO list + replicatedTotal += keyInfo.getReplicatedSize(); + unreplicatedTotal += keyInfo.getDataSize(); } + + // Set the fetched keys and totals in the response + insightResponse.setFsoKeyInfoList(fsoKeyInfoList); + insightResponse.setNonFSOKeyInfoList(nonFsoKeyInfoList); + insightResponse.setReplicatedDataSize(replicatedTotal); + insightResponse.setUnreplicatedDataSize(unreplicatedTotal); + + return Response.ok(insightResponse).build(); + } + + /** + * Creates a KeyEntityInfo object from an OmKeyInfo object. + * + * @param keyInfo The OmKeyInfo object to create the KeyEntityInfo from. + * @return The KeyEntityInfo object created from the OmKeyInfo object. + */ + private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(OmKeyInfo keyInfo) { + KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); + keyEntityInfo.setKey(keyInfo.getKeyName()); + keyEntityInfo.setPath(keyInfo.getKeyName()); // Assuming path is the same as key name + keyEntityInfo.setInStateSince(keyInfo.getCreationTime()); + keyEntityInfo.setSize(keyInfo.getDataSize()); + keyEntityInfo.setReplicatedSize(keyInfo.getReplicatedSize()); + keyEntityInfo.setReplicationConfig(keyInfo.getReplicationConfig()); + return keyEntityInfo; + } + + public List searchOpenKeysInOBS(String searchPrefix, int limit) + throws IOException { + List matchedKeys = new ArrayList<>(); + Table openKeyTable = + omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); - for (BucketLayout layout : Arrays.asList( - BucketLayout.LEGACY, BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + try ( + TableIterator> + keyIter = openKeyTable.iterator()) { + keyIter.seek(searchPrefix); + while (keyIter.hasNext() && matchedKeys.size() < limit) { + Table.KeyValue entry = keyIter.next(); + String key = entry.getKey(); + if (!key.startsWith(searchPrefix)) { + break; // Exit the loop if the key no longer matches the prefix + } + matchedKeys.add(entry.getValue()); + } + } catch (NullPointerException | IOException exception) { + LOG.error("Error retrieving keys from openFileTable for path: {} ", + searchPrefix, exception); + } + + return matchedKeys; + } + + public List searchOpenKeysInFSO(String searchPrefix, int limit) + throws IOException { + List matchedKeys = new ArrayList<>(); + // Convert the search prefix to an object path for FSO buckets + String searchPrefixObjectPath = convertToObjectPath(searchPrefix); + String[] names = parseRequestPath(searchPrefixObjectPath); + + // If names.length > 2, then the search prefix is at the volume or bucket level hence + // no need to find parent or extract id's or find subpaths as the openFileTable is + // suitable for volume and bucket level search + if (names.length > 2) { + // Fetch the parent ID to search for + long parentId = Long.parseLong(names[names.length - 1]); + + // Fetch the nameSpaceSummary for the parent ID + NSSummary parentSummary = + reconNamespaceSummaryManager.getNSSummary(parentId); + if (parentSummary == null) { + return matchedKeys; + } + List subPaths = new ArrayList<>(); + // Add the initial search prefix object path because it can have both openFiles + // and sub-directories with openFiles + subPaths.add(searchPrefixObjectPath); - Table openKeyTable = - omMetadataManager.getOpenKeyTable(layout); - try ( - TableIterator> - keyIter = openKeyTable.iterator()) { - if (layout == BucketLayout.FILE_SYSTEM_OPTIMIZED) { - searchPrefix = convertToObjectPath(searchPrefix); + // Recursively gather all subpaths + gatherSubPaths(parentId, subPaths, names); + + // Iterate over the subpaths and retrieve the open files + for (String subPath : subPaths) { + matchedKeys.addAll( + retrieveKeysFromOpenFileTable(subPath, limit - matchedKeys.size())); + if (matchedKeys.size() >= limit) { + break; } - keyIter.seek(searchPrefix); - while (keyIter.hasNext() && matchedKeys.size() < limit) { - Table.KeyValue entry = keyIter.next(); - String key = entry.getKey(); - // Break if the key no longer matches the prefix - if (!key.startsWith(searchPrefix)) { - break; - } - OmKeyInfo omKeyInfo = entry.getValue(); - // Add it to the list of matched keys. - matchedKeys.add(omKeyInfo); + } + return matchedKeys; + } + + // Iterate over for bucket and volume level search + matchedKeys.addAll(retrieveKeysFromOpenFileTable(searchPrefixObjectPath, + limit - matchedKeys.size())); + return matchedKeys; + } + + + private List retrieveKeysFromOpenFileTable(String subPath, + int limit) + throws IOException { + + // Iterate the file table. + Table table = omMetadataManager.getFileTable(); + TableIterator> iterator = table.iterator(); + while (iterator.hasNext()) { + OmKeyInfo keyInfo = iterator.next().getValue(); + System.out.println("Key: " + iterator.next().getKey() + " Size: " + keyInfo.getDataSize()); + } + + + List matchedKeys = new ArrayList<>(); + Table openFileTable = + omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + try ( + TableIterator> keyIter = + openFileTable.iterator()) { + keyIter.seek(subPath); + while (keyIter.hasNext() && matchedKeys.size() < limit) { + Table.KeyValue entry = keyIter.next(); + String key = entry.getKey(); + if (!key.startsWith(subPath)) { + break; // Exit the loop if the key no longer matches the prefix } - } catch (NullPointerException e) { - return noMatchedKeysResponse(searchPrefix); + matchedKeys.add(entry.getValue()); } + } catch (NullPointerException | IOException exception) { + LOG.error("Error retrieving keys from openFileTable for path: {} ", subPath, exception); } - if (matchedKeys.isEmpty()) { - return noMatchedKeysResponse(searchPrefix); - } else { - return Response.ok(matchedKeys).build(); + return matchedKeys; + } + + + /** + * Finds all subdirectories under a parent directory in an FSO bucket. It builds + * a list of paths for these subdirectories. These sub-directories are then used + * to search for open files in the openFileTable. + * + * How it works: + * - Starts from a parent directory identified by parentId. + * - Looks through all child directories of this parent. + * - For each child, it creates a path that starts with volumeID/bucketID/parentId, + * following our openFileTable format + * - Adds these paths to a list and explores each child further for more subdirectories. + * + * @param parentId The ID of the directory we start exploring from. + * @param subPaths A list where we collect paths to all subdirectories. + * @param names An array with at least two elements: the first is volumeID and the second is bucketID. + * These are used to start each path. + * @throws IOException If there are problems accessing directory information. + + */ + private void gatherSubPaths(long parentId, List subPaths, String[] names) throws IOException { + // Fetch the NSSummary object for parentId + NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); + if (parentSummary == null) return; + + Set childDirIds = parentSummary.getChildDir(); + for (Long childId : childDirIds) { + // Fetch the NSSummary for each child directory + NSSummary childSummary = reconNamespaceSummaryManager.getNSSummary(childId); + if (childSummary != null) { + long volumeID = Long.parseLong(names[0]); + long bucketID = Long.parseLong(names[1]); + String subPath = constructObjectPathWithPrefix(volumeID, bucketID, childId); + // Add to subPaths + subPaths.add(subPath); + // Recurse into this child directory + gatherSubPaths(childId, subPaths, names); + } } } + /** - * Converts a given key prefix to an object path for FSO buckets. - * The conversion is necessary because keys in FSO buckets are stored in a - * object format in the OpenFileTable, - * e.g., "/volumeId/bucketId/parentId/fileName/id -> KeyInfo". + * Converts a key prefix into an object path for FSO buckets, using IDs. * - * @param prevKeyPrefix The key prefix to convert. - * @return The object path for the given key prefix. - * @throws IOException if an error occurs during conversion. + * This method transforms a user-provided path (e.g., "volume/bucket/dir1") into + * a database-friendly format ("/volumeID/bucketID/ParentId/") by replacing names + * with their corresponding IDs. It simplifies database queries for FSO bucket operations. + * + * @param prevKeyPrefix The path to be converted, not including key or directory names/IDs. + * @return The object path as "/volumeID/bucketID/ParentId/". + * @throws IOException If database access fails. */ public String convertToObjectPath(String prevKeyPrefix) throws IOException { if (prevKeyPrefix.isEmpty()) { return ""; } - // Fetch the volumeID try { String[] names = parseRequestPath(normalizePath(prevKeyPrefix)); + + // Fetch the volumeID String volumeName = names[0]; String volumeKey = omMetadataManager.getVolumeKey(volumeName); - long volumeId = - omMetadataManager.getVolumeTable().getSkipCache(volumeKey).getObjectID(); - + long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey) + .getObjectID(); if (names.length == 1) { return constructObjectPathWithPrefix(volumeId); } @@ -175,19 +340,17 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException { OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().getSkipCache(bucketKey); long bucketId = bucketInfo.getObjectID(); - if (names.length == 2) { return constructObjectPathWithPrefix(volumeId, bucketId); } - // Fetch the intermediate parentID + // Fetch the immediate parentID which could be a directory or the bucket itself BucketHandler handler = getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); - long parentId = getParentId(handler, names, bucketName, bucketId); - String keyName = names[names.length - 1]; - return constructObjectPathWithPrefix(volumeId, bucketId, parentId) + - OM_KEY_PREFIX + keyName; + OmDirectoryInfo dirInfo = handler.getDirInfo(names); + return constructObjectPathWithPrefix(volumeId, bucketId, dirInfo.getObjectID()); + } catch (IOException e) { LOG.error("Error converting key prefix to object path: {}", prevKeyPrefix, e); @@ -251,8 +414,4 @@ private Response createBadRequestResponse(String message) { .build(); } - private static String normalizePath(String path) { - return OM_KEY_PREFIX + OmUtils.normalizeKey(path, false); - } - } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index d12c7b6545ac..4ed5a11329c8 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -256,7 +256,7 @@ public static String[] parseRequestPath(String path) { return names; } - private static String normalizePath(String path) { + public static String normalizePath(String path) { return OM_KEY_PREFIX + OmUtils.normalizeKey(path, false); } } From 0dacee36ef64c5febc2b5effcd73bfdc160e23d4 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 10 Mar 2024 12:19:28 +0530 Subject: [PATCH 06/32] Gave a more structured search response --- .../recon/api/OMDBInsightSearchEndpoint.java | 158 +++++++++--------- 1 file changed, 81 insertions(+), 77 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 28acd9282fdf..0c7a80af6f16 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -45,9 +45,7 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; +import java.util.*; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; @@ -62,30 +60,23 @@ @AdminOnly public class OMDBInsightSearchEndpoint { - @Inject - private ContainerEndpoint containerEndpoint; private OzoneStorageContainerManager reconSCM; - @Inject - private ReconContainerMetadataManager reconContainerMetadataManager; private final ReconOMMetadataManager omMetadataManager; private final ReconContainerManager containerManager; private static final Logger LOG = LoggerFactory.getLogger(OMDBInsightSearchEndpoint.class); - private final GlobalStatsDao globalStatsDao; private ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager; @Inject public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, - ReconOMMetadataManager omMetadataManager, - GlobalStatsDao globalStatsDao, - ReconNamespaceSummaryManagerImpl - reconNamespaceSummaryManager) { + ReconOMMetadataManager omMetadataManager, + ReconNamespaceSummaryManagerImpl + reconNamespaceSummaryManager) { this.reconSCM = reconSCM; this.containerManager = (ReconContainerManager) reconSCM.getContainerManager(); this.omMetadataManager = omMetadataManager; - this.globalStatsDao = globalStatsDao; this.reconNamespaceSummaryManager = reconNamespaceSummaryManager; } @@ -100,40 +91,42 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, */ @GET @Path("/openKeys/search") - public Response searchOpenKeys(@QueryParam("searchPrefix") String searchPrefix, - @DefaultValue("10") @QueryParam("limit") int limit) + public Response searchOpenKeys( + @QueryParam("searchPrefix") String searchPrefix, + @DefaultValue("10") @QueryParam("limit") int limit) throws IOException { if (searchPrefix == null || searchPrefix.trim().isEmpty()) { - return createBadRequestResponse("The searchPrefix query parameter is required."); + return createBadRequestResponse( + "The searchPrefix query parameter is required."); } KeyInsightInfoResponse insightResponse = new KeyInsightInfoResponse(); - List fsoKeyInfoList = new ArrayList<>(); - List nonFsoKeyInfoList = new ArrayList<>(); long replicatedTotal = 0; long unreplicatedTotal = 0; - // Fetch keys from OBS layout - List obsKeys = searchOpenKeysInOBS(searchPrefix, limit); - for (OmKeyInfo keyInfo : obsKeys) { - KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(keyInfo); - nonFsoKeyInfoList.add(keyEntityInfo); // Add to non-FSO list - replicatedTotal += keyInfo.getReplicatedSize(); - unreplicatedTotal += keyInfo.getDataSize() - keyInfo.getReplicatedSize(); + // Fetch keys from OBS layout and convert them into KeyEntityInfo objects + Map obsKeys = searchOpenKeysInOBS(searchPrefix, limit); + for (Map.Entry entry : obsKeys.entrySet()) { + KeyEntityInfo keyEntityInfo = + createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); + insightResponse.getNonFSOKeyInfoList() + .add(keyEntityInfo); // Add to non-FSO list + replicatedTotal += entry.getValue().getReplicatedSize(); + unreplicatedTotal += entry.getValue().getDataSize(); } // Fetch keys from FSO layout, if the limit is not yet reached - List fsoKeys = searchOpenKeysInFSO(searchPrefix, limit); - for (OmKeyInfo keyInfo : fsoKeys) { - KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(keyInfo); - fsoKeyInfoList.add(keyEntityInfo); // Add to FSO list - replicatedTotal += keyInfo.getReplicatedSize(); - unreplicatedTotal += keyInfo.getDataSize(); + Map fsoKeys = + searchOpenKeysInFSO(searchPrefix, limit - obsKeys.size()); + for (Map.Entry entry : fsoKeys.entrySet()) { + KeyEntityInfo keyEntityInfo = + createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); + insightResponse.getFsoKeyInfoList().add(keyEntityInfo); // Add to FSO list + replicatedTotal += entry.getValue().getReplicatedSize(); + unreplicatedTotal += entry.getValue().getDataSize(); } - // Set the fetched keys and totals in the response - insightResponse.setFsoKeyInfoList(fsoKeyInfoList); - insightResponse.setNonFSOKeyInfoList(nonFsoKeyInfoList); + // Set the aggregated totals in the response insightResponse.setReplicatedDataSize(replicatedTotal); insightResponse.setUnreplicatedDataSize(unreplicatedTotal); @@ -141,15 +134,18 @@ public Response searchOpenKeys(@QueryParam("searchPrefix") String searchPrefix, } /** - * Creates a KeyEntityInfo object from an OmKeyInfo object. + * Creates a KeyEntityInfo object from an OmKeyInfo object and the corresponding key. * + * @param dbKey The key in the database corresponding to the OmKeyInfo object. * @param keyInfo The OmKeyInfo object to create the KeyEntityInfo from. - * @return The KeyEntityInfo object created from the OmKeyInfo object. + * @return The KeyEntityInfo object created from the OmKeyInfo object and the key. */ - private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(OmKeyInfo keyInfo) { + private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, + OmKeyInfo keyInfo) { KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); - keyEntityInfo.setKey(keyInfo.getKeyName()); - keyEntityInfo.setPath(keyInfo.getKeyName()); // Assuming path is the same as key name + keyEntityInfo.setKey(dbKey); // Set the DB key + keyEntityInfo.setPath( + keyInfo.getKeyName()); // Assuming path is the same as key name keyEntityInfo.setInStateSince(keyInfo.getCreationTime()); keyEntityInfo.setSize(keyInfo.getDataSize()); keyEntityInfo.setReplicatedSize(keyInfo.getReplicatedSize()); @@ -157,10 +153,13 @@ private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(OmKeyInfo keyInfo) { return keyEntityInfo; } - public List searchOpenKeysInOBS(String searchPrefix, int limit) + + public Map searchOpenKeysInOBS(String searchPrefix, + int limit) throws IOException { - List matchedKeys = new ArrayList<>(); + Map matchedKeys = + new LinkedHashMap<>(); // Preserves the insertion order Table openKeyTable = omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); @@ -170,23 +169,26 @@ public List searchOpenKeysInOBS(String searchPrefix, int limit) keyIter.seek(searchPrefix); while (keyIter.hasNext() && matchedKeys.size() < limit) { Table.KeyValue entry = keyIter.next(); - String key = entry.getKey(); - if (!key.startsWith(searchPrefix)) { + String dbKey = entry.getKey(); // Get the DB key + if (!dbKey.startsWith(searchPrefix)) { break; // Exit the loop if the key no longer matches the prefix } - matchedKeys.add(entry.getValue()); + // Add the DB key and OmKeyInfo object to the map + matchedKeys.put(dbKey, entry.getValue()); } } catch (NullPointerException | IOException exception) { - LOG.error("Error retrieving keys from openFileTable for path: {} ", + LOG.error("Error retrieving keys from openKeyTable for path: {}", searchPrefix, exception); } return matchedKeys; } - public List searchOpenKeysInFSO(String searchPrefix, int limit) + + public Map searchOpenKeysInFSO(String searchPrefix, + int limit) throws IOException { - List matchedKeys = new ArrayList<>(); + Map matchedKeys = new LinkedHashMap<>(); // Convert the search prefix to an object path for FSO buckets String searchPrefixObjectPath = convertToObjectPath(searchPrefix); String[] names = parseRequestPath(searchPrefixObjectPath); @@ -214,7 +216,7 @@ public List searchOpenKeysInFSO(String searchPrefix, int limit) // Iterate over the subpaths and retrieve the open files for (String subPath : subPaths) { - matchedKeys.addAll( + matchedKeys.putAll( retrieveKeysFromOpenFileTable(subPath, limit - matchedKeys.size())); if (matchedKeys.size() >= limit) { break; @@ -224,42 +226,37 @@ public List searchOpenKeysInFSO(String searchPrefix, int limit) } // Iterate over for bucket and volume level search - matchedKeys.addAll(retrieveKeysFromOpenFileTable(searchPrefixObjectPath, + matchedKeys.putAll(retrieveKeysFromOpenFileTable(searchPrefixObjectPath, limit - matchedKeys.size())); return matchedKeys; } - private List retrieveKeysFromOpenFileTable(String subPath, - int limit) + private Map retrieveKeysFromOpenFileTable(String subPath, + int limit) throws IOException { - // Iterate the file table. - Table table = omMetadataManager.getFileTable(); - TableIterator> iterator = table.iterator(); - while (iterator.hasNext()) { - OmKeyInfo keyInfo = iterator.next().getValue(); - System.out.println("Key: " + iterator.next().getKey() + " Size: " + keyInfo.getDataSize()); - } - - - List matchedKeys = new ArrayList<>(); + Map matchedKeys = + new LinkedHashMap<>(); // Preserves the insertion order Table openFileTable = omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + try ( TableIterator> keyIter = openFileTable.iterator()) { keyIter.seek(subPath); while (keyIter.hasNext() && matchedKeys.size() < limit) { Table.KeyValue entry = keyIter.next(); - String key = entry.getKey(); - if (!key.startsWith(subPath)) { + String dbKey = entry.getKey(); // Get the DB key + if (!dbKey.startsWith(subPath)) { break; // Exit the loop if the key no longer matches the prefix } - matchedKeys.add(entry.getValue()); + // Add the DB key and OmKeyInfo object to the map + matchedKeys.put(dbKey, entry.getValue()); } } catch (NullPointerException | IOException exception) { - LOG.error("Error retrieving keys from openFileTable for path: {} ", subPath, exception); + LOG.error("Error retrieving keys from openFileTable for path: {}", + subPath, exception); } return matchedKeys; } @@ -269,34 +266,39 @@ private List retrieveKeysFromOpenFileTable(String subPath, * Finds all subdirectories under a parent directory in an FSO bucket. It builds * a list of paths for these subdirectories. These sub-directories are then used * to search for open files in the openFileTable. - * + *

* How it works: * - Starts from a parent directory identified by parentId. * - Looks through all child directories of this parent. * - For each child, it creates a path that starts with volumeID/bucketID/parentId, - * following our openFileTable format + * following our openFileTable format * - Adds these paths to a list and explores each child further for more subdirectories. * * @param parentId The ID of the directory we start exploring from. * @param subPaths A list where we collect paths to all subdirectories. - * @param names An array with at least two elements: the first is volumeID and the second is bucketID. - * These are used to start each path. + * @param names An array with at least two elements: the first is volumeID and the second is bucketID. + * These are used to start each path. * @throws IOException If there are problems accessing directory information. - */ - private void gatherSubPaths(long parentId, List subPaths, String[] names) throws IOException { + private void gatherSubPaths(long parentId, List subPaths, + String[] names) throws IOException { // Fetch the NSSummary object for parentId - NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); - if (parentSummary == null) return; + NSSummary parentSummary = + reconNamespaceSummaryManager.getNSSummary(parentId); + if (parentSummary == null) { + return; + } Set childDirIds = parentSummary.getChildDir(); for (Long childId : childDirIds) { // Fetch the NSSummary for each child directory - NSSummary childSummary = reconNamespaceSummaryManager.getNSSummary(childId); + NSSummary childSummary = + reconNamespaceSummaryManager.getNSSummary(childId); if (childSummary != null) { long volumeID = Long.parseLong(names[0]); long bucketID = Long.parseLong(names[1]); - String subPath = constructObjectPathWithPrefix(volumeID, bucketID, childId); + String subPath = + constructObjectPathWithPrefix(volumeID, bucketID, childId); // Add to subPaths subPaths.add(subPath); // Recurse into this child directory @@ -308,7 +310,7 @@ private void gatherSubPaths(long parentId, List subPaths, String[] name /** * Converts a key prefix into an object path for FSO buckets, using IDs. - * + *

* This method transforms a user-provided path (e.g., "volume/bucket/dir1") into * a database-friendly format ("/volumeID/bucketID/ParentId/") by replacing names * with their corresponding IDs. It simplifies database queries for FSO bucket operations. @@ -349,7 +351,8 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException { getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); OmDirectoryInfo dirInfo = handler.getDirInfo(names); - return constructObjectPathWithPrefix(volumeId, bucketId, dirInfo.getObjectID()); + return constructObjectPathWithPrefix(volumeId, bucketId, + dirInfo.getObjectID()); } catch (IOException e) { LOG.error("Error converting key prefix to object path: {}", prevKeyPrefix, @@ -403,6 +406,7 @@ private Response noMatchedKeysResponse(String searchPrefix) { /** * Utility method to create a bad request response with a custom message. + * * @param message The message to include in the response body. * @return A Response object configured with the provided message. */ From e64ec04c92422e5f4c9b619c4f35118fd040b3a3 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 11 Mar 2024 01:56:33 +0530 Subject: [PATCH 07/32] Added flags to include fso and non-fso key list --- .../recon/api/OMDBInsightSearchEndpoint.java | 65 +++++++++++-------- .../api/TestOMDBInsightSearchEndpoint.java | 15 +++-- 2 files changed, 47 insertions(+), 33 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 0c7a80af6f16..97b4763e5e50 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -48,6 +48,8 @@ import java.util.*; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.recon.ReconConstants.*; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_INCLUDE_NON_FSO; import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath; @@ -92,9 +94,14 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, @GET @Path("/openKeys/search") public Response searchOpenKeys( - @QueryParam("searchPrefix") String searchPrefix, - @DefaultValue("10") @QueryParam("limit") int limit) - throws IOException { + @QueryParam("searchPrefix") + String searchPrefix, + @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_FSO) @QueryParam(RECON_OPEN_KEY_INCLUDE_FSO) + boolean includeFso, + @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_NON_FSO) @QueryParam(RECON_OPEN_KEY_INCLUDE_NON_FSO) + boolean includeNonFso, + @DefaultValue("10") @QueryParam("limit") + int limit) throws IOException { if (searchPrefix == null || searchPrefix.trim().isEmpty()) { return createBadRequestResponse( "The searchPrefix query parameter is required."); @@ -105,25 +112,31 @@ public Response searchOpenKeys( long unreplicatedTotal = 0; // Fetch keys from OBS layout and convert them into KeyEntityInfo objects - Map obsKeys = searchOpenKeysInOBS(searchPrefix, limit); - for (Map.Entry entry : obsKeys.entrySet()) { - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - insightResponse.getNonFSOKeyInfoList() - .add(keyEntityInfo); // Add to non-FSO list - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); + Map obsKeys = null; + if (includeNonFso) { + obsKeys = searchOpenKeysInOBS(searchPrefix, limit); + for (Map.Entry entry : obsKeys.entrySet()) { + KeyEntityInfo keyEntityInfo = + createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); + insightResponse.getNonFSOKeyInfoList() + .add(keyEntityInfo); // Add to non-FSO list + replicatedTotal += entry.getValue().getReplicatedSize(); + unreplicatedTotal += entry.getValue().getDataSize(); + } } // Fetch keys from FSO layout, if the limit is not yet reached - Map fsoKeys = - searchOpenKeysInFSO(searchPrefix, limit - obsKeys.size()); - for (Map.Entry entry : fsoKeys.entrySet()) { - KeyEntityInfo keyEntityInfo = - createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - insightResponse.getFsoKeyInfoList().add(keyEntityInfo); // Add to FSO list - replicatedTotal += entry.getValue().getReplicatedSize(); - unreplicatedTotal += entry.getValue().getDataSize(); + if (includeFso) { + Map fsoKeys = + searchOpenKeysInFSO(searchPrefix, limit - obsKeys.size()); + for (Map.Entry entry : fsoKeys.entrySet()) { + KeyEntityInfo keyEntityInfo = + createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); + insightResponse.getFsoKeyInfoList() + .add(keyEntityInfo); // Add to FSO list + replicatedTotal += entry.getValue().getReplicatedSize(); + unreplicatedTotal += entry.getValue().getDataSize(); + } } // Set the aggregated totals in the response @@ -330,8 +343,7 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException { // Fetch the volumeID String volumeName = names[0]; String volumeKey = omMetadataManager.getVolumeKey(volumeName); - long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey) - .getObjectID(); + long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey).getObjectID(); if (names.length == 1) { return constructObjectPathWithPrefix(volumeId); } @@ -348,15 +360,12 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException { // Fetch the immediate parentID which could be a directory or the bucket itself BucketHandler handler = - getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, - reconSCM, bucketInfo); - OmDirectoryInfo dirInfo = handler.getDirInfo(names); - return constructObjectPathWithPrefix(volumeId, bucketId, - dirInfo.getObjectID()); + getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); + long dirObjectId = handler.getDirInfo(names).getObjectID(); + return constructObjectPathWithPrefix(volumeId, bucketId, dirObjectId); } catch (IOException e) { - LOG.error("Error converting key prefix to object path: {}", prevKeyPrefix, - e); + LOG.error("Error converting key prefix to object path: {}", prevKeyPrefix, e); return prevKeyPrefix; // Fallback to original prefix in case of exception } catch (Exception e) { LOG.error("Unexpected error during conversion: {}", prevKeyPrefix, e); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 8eafd3ea8ec0..ce44134c788f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -127,7 +127,8 @@ public void testSearchOpenKeys() throws Exception { // Search for keys in 'bucketOne' String searchPrefixBucketOne = "/sampleVol/bucketOne/"; Response responseBucketOne = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixBucketOne, 10); + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixBucketOne, true, + true, 10); // Assert that the search response for 'bucketOne' is OK and verify the results assertEquals(Response.Status.OK.getStatusCode(), @@ -146,7 +147,8 @@ public void testSearchOpenKeys() throws Exception { // Search for keys in 'bucketTwo' String searchPrefixBucketTwo = "/sampleVol/bucketTwo/"; Response responseBucketTwo = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixBucketTwo, 10); + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixBucketTwo, true, + true, 10); // Assert that the search response for 'bucketTwo' is OK and verify the results assertEquals(Response.Status.OK.getStatusCode(), @@ -214,7 +216,8 @@ public void testSearchOpenKeysWithDifferentBucketLayouts() throws Exception { // Search for keys under the LEGACY bucket String searchPrefixLegacy = "/sampleVol/legacyBucket/"; Response responseLegacy = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixLegacy, 10); + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixLegacy, true, true, + 10); // Verify response for LEGACY bucket layout assertEquals(Response.Status.OK.getStatusCode(), @@ -230,7 +233,8 @@ public void testSearchOpenKeysWithDifferentBucketLayouts() throws Exception { // Search for keys under the FSO bucket String searchPrefixFso = "/sampleVol/fsoBucket/"; Response responseFso = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixFso, 10); + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixFso, true, true, + 10); // Verify response for FSO bucket layout assertEquals(Response.Status.OK.getStatusCode(), responseFso.getStatus()); @@ -244,7 +248,8 @@ public void testSearchOpenKeysWithDifferentBucketLayouts() throws Exception { // Pass only the volume name and verify the response. String searchPrefixVolume = "/sampleVol/"; Response responseVolume = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixVolume, 10); + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixVolume, true, true, + 10); // Verify response for volume name assertEquals(Response.Status.OK.getStatusCode(), From 4dec79848c4b43e435e21489721cc4a294467dbd Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 13 Mar 2024 02:13:42 +0530 Subject: [PATCH 08/32] Added more UT's --- .../recon/api/OMDBInsightSearchEndpoint.java | 40 +- .../api/TestOMDBInsightSearchEndpoint.java | 582 +++++++++++------- 2 files changed, 388 insertions(+), 234 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 97b4763e5e50..49425a2f18cc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -84,12 +84,14 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, /** - * Search for open keys in the OM DB Insight. - * - * @param searchPrefix The prefix to search for. - * @param limit The maximum number of keys to return. - * @return The response containing the matched keys. - * @throws IOException if an error occurs during the search. + * This method searches for open keys in the OM DB based on the search prefix + * provided. It returns a list of keys that match the search prefix, along with + * the total size of the data and replicated data for the matched keys. + * The search can be performed on FSO layout, non-FSO layout, or both. + * The search is performed in the following order: + * 1. Search for open keys in the OBS layout (non-FSO layout). + * 2. Search for open keys in the FSO layout. + * The search is performed for fso and non-fso keys separately and the results. */ @GET @Path("/openKeys/search") @@ -112,7 +114,7 @@ public Response searchOpenKeys( long unreplicatedTotal = 0; // Fetch keys from OBS layout and convert them into KeyEntityInfo objects - Map obsKeys = null; + Map obsKeys = new LinkedHashMap<>(); if (includeNonFso) { obsKeys = searchOpenKeysInOBS(searchPrefix, limit); for (Map.Entry entry : obsKeys.entrySet()) { @@ -128,7 +130,7 @@ public Response searchOpenKeys( // Fetch keys from FSO layout, if the limit is not yet reached if (includeFso) { Map fsoKeys = - searchOpenKeysInFSO(searchPrefix, limit - obsKeys.size()); + searchOpenKeysInFSO(searchPrefix, limit); for (Map.Entry entry : fsoKeys.entrySet()) { KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); @@ -139,6 +141,12 @@ public Response searchOpenKeys( } } + // If no keys were found, return a response indicating that no keys matched + if (insightResponse.getNonFSOKeyInfoList().isEmpty() && + insightResponse.getFsoKeyInfoList().isEmpty()) { + return noMatchedKeysResponse(searchPrefix); + } + // Set the aggregated totals in the response insightResponse.setReplicatedDataSize(replicatedTotal); insightResponse.setUnreplicatedDataSize(unreplicatedTotal); @@ -279,7 +287,7 @@ private Map retrieveKeysFromOpenFileTable(String subPath, * Finds all subdirectories under a parent directory in an FSO bucket. It builds * a list of paths for these subdirectories. These sub-directories are then used * to search for open files in the openFileTable. - *

+ * * How it works: * - Starts from a parent directory identified by parentId. * - Looks through all child directories of this parent. @@ -289,8 +297,8 @@ private Map retrieveKeysFromOpenFileTable(String subPath, * * @param parentId The ID of the directory we start exploring from. * @param subPaths A list where we collect paths to all subdirectories. - * @param names An array with at least two elements: the first is volumeID and the second is bucketID. - * These are used to start each path. + * @param names An array with at least two elements: the first is volumeID and + * the second is bucketID. These are used to start each path. * @throws IOException If there are problems accessing directory information. */ private void gatherSubPaths(long parentId, List subPaths, @@ -373,16 +381,6 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException { } } - private long getParentId(BucketHandler handler, String[] names, - String bucketName, long bucketId) - throws IOException { - String parentName = names[names.length - 2]; - if (bucketName.equals(parentName) && names.length == 3) { - return bucketId; - } - return handler.getDirObjectId(names, names.length - 1); - } - /** * Constructs an object path with the given IDs. * diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index ce44134c788f..c91f9a7539d8 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -18,76 +18,85 @@ package org.apache.hadoop.ozone.recon.api; -import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; -import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.recon.ReconTestInjector; + +import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO; import org.junit.jupiter.api.BeforeEach; + +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.*; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.junit.platform.commons.logging.Logger; +import org.junit.platform.commons.logging.LoggerFactory; + import javax.ws.rs.core.Response; +import java.io.File; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.util.HashSet; -import java.util.List; -import java.util.Random; -import java.util.Set; - -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.mock; +import java.util.*; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.mockito.Mockito.when; + /** * Test for OMDBInsightSearchEndpoint. */ public class TestOMDBInsightSearchEndpoint extends AbstractReconSqlDBTest { + @TempDir private Path temporaryFolder; - private OMMetadataManager omMetadataManager; + + Logger LOG = LoggerFactory.getLogger(TestOMDBInsightSearchEndpoint.class); + private ReconOMMetadataManager reconOMMetadataManager; private OMDBInsightSearchEndpoint omdbInsightSearchEndpoint; - private Random random = new Random(); - private Set generatedIds = new HashSet<>(); - public TestOMDBInsightSearchEndpoint() { - super(); - } - - private long generateUniqueRandomLong() { - long newValue; - do { - newValue = random.nextLong(); - } while (generatedIds.contains(newValue)); + private OzoneConfiguration ozoneConfiguration; + private static final String ROOT_PATH = "/"; + private static final String TEST_USER = "TestUser"; + private OMMetadataManager omMetadataManager; - generatedIds.add(newValue); - return newValue; - } + private ReconNamespaceSummaryManager reconNamespaceSummaryManager; @BeforeEach public void setUp() throws Exception { + ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD, + 100); omMetadataManager = initializeNewOmMetadataManager( - Files.createDirectory(temporaryFolder.resolve( - "JunitOmMetadata")).toFile()); + Files.createDirectory(temporaryFolder.resolve("JunitOmDBDir")) + .toFile()); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProviderWithFSO(); reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, - Files.createDirectory(temporaryFolder.resolve( - "JunitOmMetadataTest")).toFile()); + Files.createDirectory(temporaryFolder.resolve("OmMetataDir")).toFile()); + ReconTestInjector reconTestInjector = new ReconTestInjector.Builder(temporaryFolder.toFile()) .withReconSqlDb() @@ -101,210 +110,357 @@ public void setUp() throws Exception { .addBinding(OMDBInsightEndpoint.class) .addBinding(ContainerHealthSchemaManager.class) .build(); + reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); omdbInsightSearchEndpoint = reconTestInjector.getInstance( OMDBInsightSearchEndpoint.class); + + // populate OM DB and reprocess into Recon RocksDB + populateOMDB(); + NSSummaryTaskWithFSO nSSummaryTaskWithFso = + new NSSummaryTaskWithFSO(reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); } - @Test - public void testSearchOpenKeys() throws Exception { - // Create 3 keys in 'bucketOne' and 2 keys in 'bucketTwo' - for (int i = 1; i <= 3; i++) { - OmKeyInfo omKeyInfo = - getOmKeyInfo("sampleVol", "bucketOne", "key_" + i, true); - String keyPath = String.format("/sampleVol/%s/key_%d", "bucketOne", i); - reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) - .put(keyPath, omKeyInfo); - } - - for (int i = 1; i <= 2; i++) { - OmKeyInfo omKeyInfo = - getOmKeyInfo("sampleVol", "bucketTwo", "key_" + i, true); - String keyPath = String.format("/sampleVol/%s/key_%d", "bucketTwo", i); - reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) - .put(keyPath, omKeyInfo); - } - - // Search for keys in 'bucketOne' - String searchPrefixBucketOne = "/sampleVol/bucketOne/"; - Response responseBucketOne = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixBucketOne, true, - true, 10); - - // Assert that the search response for 'bucketOne' is OK and verify the results - assertEquals(Response.Status.OK.getStatusCode(), - responseBucketOne.getStatus()); - List searchResultsBucketOne = - (List) responseBucketOne.getEntity(); - assertNotNull(searchResultsBucketOne); - assertEquals(3, searchResultsBucketOne.size()); - - searchResultsBucketOne.forEach(keyInfo -> { - assertTrue(keyInfo.getKeyName().startsWith("key_")); - assertTrue(keyInfo.getVolumeName().startsWith("sampleVol")); - assertTrue(keyInfo.getBucketName().startsWith("bucketOne")); - }); - - // Search for keys in 'bucketTwo' - String searchPrefixBucketTwo = "/sampleVol/bucketTwo/"; - Response responseBucketTwo = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixBucketTwo, true, - true, 10); - - // Assert that the search response for 'bucketTwo' is OK and verify the results - assertEquals(Response.Status.OK.getStatusCode(), - responseBucketTwo.getStatus()); - List searchResultsBucketTwo = - (List) responseBucketTwo.getEntity(); - assertNotNull(searchResultsBucketTwo); - assertEquals(2, searchResultsBucketTwo.size()); - - searchResultsBucketTwo.forEach(keyInfo -> { - assertTrue(keyInfo.getKeyName().startsWith("key_")); - assertTrue(keyInfo.getVolumeName().startsWith("sampleVol")); - assertTrue(keyInfo.getBucketName().startsWith("bucketTwo")); - }); + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * + * @throws IOException ioEx + */ + private static OMMetadataManager initializeNewOmMetadataManager( + File omDbDir) + throws IOException { + OzoneConfiguration omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + return omMetadataManager; } @Test - public void testConvertToObjectPathForEmptyPrefix() throws Exception { - String result = omdbInsightSearchEndpoint.convertToObjectPath(""); - assertEquals("", result, "Expected an empty string for empty input"); + public void testRootLevelSearch() throws IOException { + Response response = + omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, true, true, 20); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(10, result.getFsoKeyInfoList().size()); + assertEquals(5, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(15000, result.getUnreplicatedDataSize()); + assertEquals(15000 * 3, result.getReplicatedDataSize()); + + // Switch of the include Fso flag + response = + omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, false, true, 20); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(0, result.getFsoKeyInfoList().size()); + assertEquals(5, result.getNonFSOKeyInfoList().size()); + + // Switch of the include Non Fso flag + response = + omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, true, false, 20); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(10, result.getFsoKeyInfoList().size()); } @Test - public void testConvertToObjectPathForVolumeOnly() throws Exception { - setupVolume("vol1", 100L); - - String result = omdbInsightSearchEndpoint.convertToObjectPath("/vol1"); - assertEquals("/100", result, "Incorrect conversion for volume only path"); + public void testBucketLevelSearch() throws IOException { + Response response = + omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1", true, true, 20); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(5, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(5000, result.getUnreplicatedDataSize()); + assertEquals(5000 * 3, result.getReplicatedDataSize()); + + response = + omdbInsightSearchEndpoint.searchOpenKeys("/volB/bucketB1", true, true, 20); + assertEquals(200, response.getStatus()); + result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(5, result.getNonFSOKeyInfoList().size()); + assertEquals(0, result.getFsoKeyInfoList().size()); + // Assert Total Size + assertEquals(5000, result.getUnreplicatedDataSize()); + assertEquals(5000 * 3, result.getReplicatedDataSize()); } @Test - public void testConvertToObjectPathForVolumeAndBucket() throws Exception { - setupVolume("vol1", 100L); - setupBucket("vol1", "bucketOne", 200L, BucketLayout.LEGACY); - - String result = - omdbInsightSearchEndpoint.convertToObjectPath("vol1/bucketOne"); - assertEquals("/100/200", result, - "Incorrect conversion for volume and bucket path"); + public void testDirectoryLevelSearch() throws IOException { + Response response = + omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA1", true, true, 20); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(1000, result.getUnreplicatedDataSize()); + assertEquals(1000 * 3, result.getReplicatedDataSize()); + + response = + omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA2", true, true, 20); + assertEquals(200, response.getStatus()); + result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(1000, result.getUnreplicatedDataSize()); + assertEquals(1000 * 3, result.getReplicatedDataSize()); + + response = + omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA3", true, true, 20); + assertEquals(200, response.getStatus()); + result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(1000, result.getUnreplicatedDataSize()); + assertEquals(1000 * 3, result.getReplicatedDataSize()); } @Test - public void testSearchOpenKeysWithDifferentBucketLayouts() throws Exception { - // Setup for LEGACY bucket layout - OmKeyInfo legacyKeyInfo = - getOmKeyInfo("sampleVol", "legacyBucket", "legacy_key_1", true); - String legacyKeyPath = - String.format("/sampleVol/%s/%s", "legacyBucket", "legacy_key_1"); - reconOMMetadataManager.getOpenKeyTable(BucketLayout.LEGACY) - .put(legacyKeyPath, legacyKeyInfo); - - // Setup for FSO bucket layout - OmKeyInfo fsoKeyInfo = - getOmKeyInfo("sampleVol", "fsoBucket", "fso_key_1", true); - setupBucket("sampleVol", "fsoBucket", 200L, - BucketLayout.FILE_SYSTEM_OPTIMIZED); - long fsoKeyObjectId = fsoKeyInfo.getObjectID(); - long bucketId = 200L; - long volumeId = 0L; - String fsoKeyPath = - String.format("/%s/%s/%s", volumeId, bucketId, fsoKeyObjectId); - reconOMMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED) - .put(fsoKeyPath, fsoKeyInfo); - - // Search for keys under the LEGACY bucket - String searchPrefixLegacy = "/sampleVol/legacyBucket/"; - Response responseLegacy = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixLegacy, true, true, - 10); - - // Verify response for LEGACY bucket layout - assertEquals(Response.Status.OK.getStatusCode(), - responseLegacy.getStatus()); - List searchResultsLegacy = - (List) responseLegacy.getEntity(); - assertNotNull(searchResultsLegacy); - assertEquals(1, searchResultsLegacy.size()); - assertTrue( - searchResultsLegacy.get(0).getKeyName().startsWith("legacy_key_")); - assertEquals("legacyBucket", searchResultsLegacy.get(0).getBucketName()); - - // Search for keys under the FSO bucket - String searchPrefixFso = "/sampleVol/fsoBucket/"; - Response responseFso = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixFso, true, true, - 10); - - // Verify response for FSO bucket layout - assertEquals(Response.Status.OK.getStatusCode(), responseFso.getStatus()); - List searchResultsFso = - (List) responseFso.getEntity(); - assertNotNull(searchResultsFso); - assertEquals(1, searchResultsFso.size()); - assertTrue(searchResultsFso.get(0).getKeyName().startsWith("fso_key_")); - assertEquals("fsoBucket", searchResultsFso.get(0).getBucketName()); - - // Pass only the volume name and verify the response. - String searchPrefixVolume = "/sampleVol/"; - Response responseVolume = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefixVolume, true, true, - 10); - - // Verify response for volume name - assertEquals(Response.Status.OK.getStatusCode(), - responseVolume.getStatus()); - List searchResultsVolume = - (List) responseVolume.getEntity(); - assertNotNull(searchResultsVolume); - assertEquals(2, searchResultsVolume.size()); + public void testLimitSearch() throws IOException { + Response response = + omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, true, true, 5); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(5, result.getFsoKeyInfoList().size()); + assertEquals(5, result.getNonFSOKeyInfoList().size()); } - private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, - String keyName, boolean isFile) { - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setFile(isFile) - .setObjectID(generateUniqueRandomLong()) - .setReplicationConfig(StandaloneReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.ONE)) - .setDataSize(random.nextLong()) - .build(); - } - public void setupVolume(String volumeName, long volumeId) throws Exception { - Table volumeTable = - reconOMMetadataManager.getVolumeTable(); + /** + * Tests the NSSummaryEndpoint for a given volume, bucket, and directory structure. + * The test setup mimics the following filesystem structure with specified sizes: + *

+ * root (Total Size: 15000KB) + * ├── volA (Total Size: 10000KB) + * │ ├── bucketA1 (FSO) Total Size: 5000KB + * │ │ ├── fileA1 (Size: 1000KB) + * │ │ ├── fileA2 (Size: 1000KB) + * │ │ ├── dirA1 (Total Size: 1000KB) + * │ │ ├── dirA2 (Total Size: 1000KB) + * │ │ └── dirA3 (Total Size: 1000KB) + * │ ├── bucketA2 (FSO) Total Size: 5000KB + * │ │ ├── fileA3 (Size: 1000KB) + * │ │ ├── fileA4 (Size: 1000KB) + * │ │ ├── dirA4 (Total Size: 1000KB) + * │ │ ├── dirA5 (Total Size: 1000KB) + * │ │ └── dirA6 (Total Size: 1000KB) + * └── volB (Total Size: 5000KB) + * └── bucketB1 (OBS) Total Size: 5000KB + * ├── fileB1 (Size: 1000KB) + * ├── fileB2 (Size: 1000KB) + * ├── fileB3 (Size: 1000KB) + * ├── fileB4 (Size: 1000KB) + * └── fileB5 (Size: 1000KB) + * + * @throws Exception + */ + private void populateOMDB() throws Exception { + // Create Volumes + long volAObjectId = createVolume("volA"); + long volBObjectId = createVolume("volB"); + + // Create Buckets in volA + long bucketA1ObjectId = + createBucket("volA", "bucketA1", 1000 + 1000 + 1000 + 1000 + 1000, + getFSOBucketLayout()); + long bucketA2ObjectId = + createBucket("volA", "bucketA2", 1000 + 1000 + 1000 + 1000 + 1000, + getFSOBucketLayout()); + + // Create Bucket in volB + long bucketB1ObjectId = + createBucket("volB", "bucketB1", 1000 + 1000 + 1000 + 1000 + 1000, + getOBSBucketLayout()); + + // Create Directories and Files under bucketA1 + long dirA1ObjectId = + createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId, + "dirA1"); + long dirA2ObjectId = + createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId, + "dirA2"); + long dirA3ObjectId = + createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId, + "dirA3"); + + // Files directly under bucketA1 + createOpenFile("fileA1", "bucketA1", "volA", "fileA1", bucketA1ObjectId, + bucketA1ObjectId, volAObjectId, 1000); + createOpenFile("fileA2", "bucketA1", "volA", "fileA2", bucketA1ObjectId, + bucketA1ObjectId, volAObjectId, 1000); + + // Create Directories and Files under bucketA2 + long dirA4ObjectId = + createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId, + "dirA4"); + long dirA5ObjectId = + createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId, + "dirA5"); + long dirA6ObjectId = + createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId, + "dirA6"); + + // Files directly under bucketA2 + createOpenFile("fileA3", "bucketA2", "volA", "fileA3", bucketA2ObjectId, + bucketA2ObjectId, volAObjectId, 1000); + createOpenFile("fileA4", "bucketA2", "volA", "fileA4", bucketA2ObjectId, + bucketA2ObjectId, volAObjectId, 1000); + + // Files directly under bucketB1 + createOpenKey("fileB1", "bucketB1", "volB", "fileB1", bucketB1ObjectId, + bucketB1ObjectId, volBObjectId, 1000); + createOpenKey("fileB2", "bucketB1", "volB", "fileB2", bucketB1ObjectId, + bucketB1ObjectId, volBObjectId, 1000); + createOpenKey("fileB3", "bucketB1", "volB", "fileB3", bucketB1ObjectId, + bucketB1ObjectId, volBObjectId, 1000); + createOpenKey("fileB4", "bucketB1", "volB", "fileB4", bucketB1ObjectId, + bucketB1ObjectId, volBObjectId, 1000); + createOpenKey("fileB5", "bucketB1", "volB", "fileB5", bucketB1ObjectId, + bucketB1ObjectId, volBObjectId, 1000); + + // Create Inner files under directories + createOpenFile("dirA1/innerFile", "bucketA1", "volA", "innerFile", + dirA1ObjectId, bucketA1ObjectId, volAObjectId, 1000); + createOpenFile("dirA2/innerFile", "bucketA1", "volA", "innerFile", + dirA2ObjectId, bucketA1ObjectId, volAObjectId, 1000); + createOpenFile("dirA3/innerFile", "bucketA1", "volA", "innerFile", + dirA3ObjectId, bucketA1ObjectId, volAObjectId, 1000); + createOpenFile("dirA4/innerFile", "bucketA2", "volA", "innerFile", + dirA4ObjectId, bucketA2ObjectId, volAObjectId, 1000); + createOpenFile("dirA5/innerFile", "bucketA2", "volA", "innerFile", + dirA5ObjectId, bucketA2ObjectId, volAObjectId, 1000); + createOpenFile("dirA6/innerFile", "bucketA2", "volA", "innerFile", + dirA6ObjectId, bucketA2ObjectId, volAObjectId, 1000); + } - OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setVolume(volumeName) - .setAdminName("TestUser") - .setOwnerName("TestUser") + /** + * Create a volume and add it to the Volume Table. + * + * @return volume Object ID + * @throws IOException + */ + private long createVolume(String volumeName) throws Exception { + String volumeKey = reconOMMetadataManager.getVolumeKey(volumeName); + long volumeId = UUID.randomUUID().getMostSignificantBits() & + Long.MAX_VALUE; // Generate positive ID + OmVolumeArgs args = OmVolumeArgs.newBuilder() .setObjectID(volumeId) + .setVolume(volumeName) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) .build(); - // Insert the volume into the table - volumeTable.put(reconOMMetadataManager.getVolumeKey(volumeName), - omVolumeArgs); + + reconOMMetadataManager.getVolumeTable().put(volumeKey, args); + return volumeId; } - private void setupBucket(String volumeName, String bucketName, long bucketId, - BucketLayout layout) + /** + * Create a bucket and add it to the Bucket Table. + * + * @return bucket Object ID + * @throws IOException + */ + private long createBucket(String volumeName, String bucketName, long dataSize, + BucketLayout bucketLayout) throws Exception { - Table bucketTable = - reconOMMetadataManager.getBucketTable(); - - OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() + String bucketKey = + reconOMMetadataManager.getBucketKey(volumeName, bucketName); + long bucketId = UUID.randomUUID().getMostSignificantBits() & + Long.MAX_VALUE; // Generate positive ID + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName) .setBucketName(bucketName) .setObjectID(bucketId) - .setBucketLayout(layout) + .setBucketLayout(bucketLayout) + .setUsedBytes(dataSize) .build(); - String bucketKey = - reconOMMetadataManager.getBucketKey(volumeName, bucketName); - bucketTable.put(bucketKey, omBucketInfo); + reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo); + return bucketId; + } + + /** + * Create a directory and add it to the Directory Table. + * + * @return directory Object ID + * @throws IOException + */ + private long createDirectory(long parentObjectId, + long bucketObjectId, + long volumeObjectId, + String dirName) throws IOException { + long objectId = UUID.randomUUID().getMostSignificantBits() & + Long.MAX_VALUE; // Ensure positive ID + writeDirToOm(reconOMMetadataManager, objectId, parentObjectId, + bucketObjectId, + volumeObjectId, dirName); + return objectId; + } + + /** + * Create a file and add it to the Open File Table. + * + * @return file Object ID + * @throws IOException + */ + @SuppressWarnings("checkstyle:ParameterNumber") + private long createOpenFile(String key, + String bucket, + String volume, + String fileName, + long parentObjectId, + long bucketObjectId, + long volumeObjectId, + long dataSize) throws IOException { + long objectId = UUID.randomUUID().getMostSignificantBits() & + Long.MAX_VALUE; // Ensure positive ID + writeOpenFileToOm(reconOMMetadataManager, key, bucket, volume, fileName, + objectId, parentObjectId, bucketObjectId, volumeObjectId, null, + dataSize); + return objectId; + } + + /** + * Create a key and add it to the Open Key Table. + * + * @return key Object ID + * @throws IOException + */ + private long createOpenKey(String key, + String bucket, + String volume, + String fileName, + long parentObjectId, + long bucketObjectId, + long volumeObjectId, + long dataSize) throws IOException { + long objectId = UUID.randomUUID().getMostSignificantBits() & + Long.MAX_VALUE; // Ensure positive ID + writeOpenKeyToOm(reconOMMetadataManager, key, bucket, volume, null, + dataSize); + return objectId; + } + + private static BucketLayout getFSOBucketLayout() { + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + } + + private static BucketLayout getOBSBucketLayout() { + return BucketLayout.OBJECT_STORE; } } From 2be413615ef24839665039aaf66e9bde8705b2bb Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 13 Mar 2024 13:02:55 +0530 Subject: [PATCH 09/32] Fixed the left out review comments --- .../recon/api/OMDBInsightSearchEndpoint.java | 61 +++++++++++-------- .../api/TestOMDBInsightSearchEndpoint.java | 20 +++++- 2 files changed, 55 insertions(+), 26 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 49425a2f18cc..3535cd603bed 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -30,9 +30,7 @@ import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; -import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; -import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,10 +43,16 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import java.io.IOException; -import java.util.*; +import java.util.Map; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.ArrayList; +import java.util.Set; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.recon.ReconConstants.*; +import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_FSO; +import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_NON_FSO; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_INCLUDE_FSO; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_INCLUDE_NON_FSO; import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath; @@ -64,7 +68,6 @@ public class OMDBInsightSearchEndpoint { private OzoneStorageContainerManager reconSCM; private final ReconOMMetadataManager omMetadataManager; - private final ReconContainerManager containerManager; private static final Logger LOG = LoggerFactory.getLogger(OMDBInsightSearchEndpoint.class); private ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager; @@ -73,25 +76,33 @@ public class OMDBInsightSearchEndpoint { @Inject public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, ReconOMMetadataManager omMetadataManager, - ReconNamespaceSummaryManagerImpl - reconNamespaceSummaryManager) { + ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager) { this.reconSCM = reconSCM; - this.containerManager = - (ReconContainerManager) reconSCM.getContainerManager(); this.omMetadataManager = omMetadataManager; this.reconNamespaceSummaryManager = reconNamespaceSummaryManager; } /** - * This method searches for open keys in the OM DB based on the search prefix - * provided. It returns a list of keys that match the search prefix, along with - * the total size of the data and replicated data for the matched keys. - * The search can be performed on FSO layout, non-FSO layout, or both. - * The search is performed in the following order: - * 1. Search for open keys in the OBS layout (non-FSO layout). - * 2. Search for open keys in the FSO layout. - * The search is performed for fso and non-fso keys separately and the results. + * Performs a search for open keys in the Ozone Manager (OM) database using a specified search prefix. + * This endpoint can search across both File System Optimized (FSO) and Object Store (non-FSO) layouts, + * compiling a list of keys that match the given prefix along with their data sizes. + * + * The search prefix may range from the root level ('/') to any specific directory + * or key level (e.g., '/volA/' for everything under 'volA'). The search operation matches + * the prefix against the start of keys' names within the OM DB. + * + * Example Usage: + * 1. A searchPrefix of "/" will return all keys in the database. + * 2. A searchPrefix of "/volA/" retrieves every key under volume 'volA'. + * 3. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' inside 'bucketA' of 'volA'. + * + * @param searchPrefix The prefix for searching keys, starting from the root ('/') or any specific path. + * @param includeFso Indicates whether to include FSO layout keys in the search. + * @param includeNonFso Indicates whether to include non-FSO layout keys in the search. + * @param limit Limits the number of returned keys. + * @return A KeyInsightInfoResponse, containing matching keys and their data sizes. + * @throws IOException On failure to access the OM database or process the operation. */ @GET @Path("/openKeys/search") @@ -112,12 +123,14 @@ public Response searchOpenKeys( KeyInsightInfoResponse insightResponse = new KeyInsightInfoResponse(); long replicatedTotal = 0; long unreplicatedTotal = 0; + boolean keysFound = false; // Flag to track if any keys are found // Fetch keys from OBS layout and convert them into KeyEntityInfo objects Map obsKeys = new LinkedHashMap<>(); if (includeNonFso) { obsKeys = searchOpenKeysInOBS(searchPrefix, limit); for (Map.Entry entry : obsKeys.entrySet()) { + keysFound = true; KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); insightResponse.getNonFSOKeyInfoList() @@ -129,9 +142,9 @@ public Response searchOpenKeys( // Fetch keys from FSO layout, if the limit is not yet reached if (includeFso) { - Map fsoKeys = - searchOpenKeysInFSO(searchPrefix, limit); + Map fsoKeys = searchOpenKeysInFSO(searchPrefix, limit); for (Map.Entry entry : fsoKeys.entrySet()) { + keysFound = true; KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); insightResponse.getFsoKeyInfoList() @@ -142,8 +155,7 @@ public Response searchOpenKeys( } // If no keys were found, return a response indicating that no keys matched - if (insightResponse.getNonFSOKeyInfoList().isEmpty() && - insightResponse.getFsoKeyInfoList().isEmpty()) { + if (!keysFound) { return noMatchedKeysResponse(searchPrefix); } @@ -340,7 +352,7 @@ private void gatherSubPaths(long parentId, List subPaths, * @return The object path as "/volumeID/bucketID/ParentId/". * @throws IOException If database access fails. */ - public String convertToObjectPath(String prevKeyPrefix) throws IOException { + public String convertToObjectPath(String prevKeyPrefix) { if (prevKeyPrefix.isEmpty()) { return ""; } @@ -402,9 +414,8 @@ private String constructObjectPathWithPrefix(long... ids) { * @return The response indicating that no keys matched the search prefix. */ private Response noMatchedKeysResponse(String searchPrefix) { - String message = - "{\"message\": \"No keys exist for the specified search prefix"; - message += ".\"}"; + String message = String.format( + "No keys matched the search prefix: '%s'.", searchPrefix); return Response.status(Response.Status.NOT_FOUND) .entity(message) .type(MediaType.APPLICATION_JSON) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index c91f9a7539d8..d8ebc380ac0e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO; +import org.glassfish.jersey.internal.Errors; import org.junit.jupiter.api.BeforeEach; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.*; @@ -239,11 +240,28 @@ public void testLimitSearch() throws IOException { assertEquals(5, result.getNonFSOKeyInfoList().size()); } + @Test + public void testSearchOpenKeysWithNoMatchFound() throws IOException { + // Given a search prefix that matches no keys + String searchPrefix = "nonexistentKeyPrefix"; + + Response response = + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefix, true, true, 10); + + // Then the response should indicate that no keys were found + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), + response.getStatus(), "Expected a 404 NOT FOUND status"); + + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys exist for the specified search prefix"), + "Expected a message indicating no keys were found"); + } + /** * Tests the NSSummaryEndpoint for a given volume, bucket, and directory structure. * The test setup mimics the following filesystem structure with specified sizes: - *

+ * * root (Total Size: 15000KB) * ├── volA (Total Size: 10000KB) * │ ├── bucketA1 (FSO) Total Size: 5000KB From bf8e661d82b6e117f0a632b09b98a14c725a552d Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 13 Mar 2024 13:22:18 +0530 Subject: [PATCH 10/32] Added more changes and newer tests --- .../recon/api/OMDBInsightSearchEndpoint.java | 26 ++++++++++++++++--- .../api/TestOMDBInsightSearchEndpoint.java | 17 ++++++++++++ 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 3535cd603bed..49f05042e5bd 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -210,8 +210,8 @@ public Map searchOpenKeysInOBS(String searchPrefix, matchedKeys.put(dbKey, entry.getValue()); } } catch (NullPointerException | IOException exception) { - LOG.error("Error retrieving keys from openKeyTable for path: {}", - searchPrefix, exception); + createInternalServerErrorResponse( + "Error retrieving keys from openKeyTable for path: " + searchPrefix); } return matchedKeys; @@ -288,8 +288,8 @@ private Map retrieveKeysFromOpenFileTable(String subPath, matchedKeys.put(dbKey, entry.getValue()); } } catch (NullPointerException | IOException exception) { - LOG.error("Error retrieving keys from openFileTable for path: {}", - subPath, exception); + createInternalServerErrorResponse( + "Error retrieving keys from openFileTable for path: " + subPath); } return matchedKeys; } @@ -424,6 +424,8 @@ private Response noMatchedKeysResponse(String searchPrefix) { /** * Utility method to create a bad request response with a custom message. + * Which means the request sent by the client to the server is incorrect + * or malformed and cannot be processed by the server. * * @param message The message to include in the response body. * @return A Response object configured with the provided message. @@ -436,4 +438,20 @@ private Response createBadRequestResponse(String message) { .build(); } + /** + * Utility method to create an internal server error response with a custom message. + * Which means the server encountered an unexpected condition that prevented it + * from fulfilling the request. + * + * @param message The message to include in the response body. + * @return A Response object configured with the provided message. + */ + private Response createInternalServerErrorResponse(String message) { + String jsonResponse = String.format("{\"message\": \"%s\"}", message); + return Response.status(Response.Status.INTERNAL_SERVER_ERROR) + .entity(jsonResponse) + .type(MediaType.APPLICATION_JSON) + .build(); + } + } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index d8ebc380ac0e..fda45e096d39 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -257,6 +257,23 @@ public void testSearchOpenKeysWithNoMatchFound() throws IOException { "Expected a message indicating no keys were found"); } + @Test + public void testSearchOpenKeysWithBadRequest() throws IOException { + // Given a search prefix that is empty + String searchPrefix = ""; + + Response response = + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefix, true, true, 10); + + // Then the response should indicate that the request was bad + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), + response.getStatus(), "Expected a 400 BAD REQUEST status"); + + String entity = (String) response.getEntity(); + assertTrue(entity.contains("The searchPrefix query parameter is required."), + "Expected a message indicating the search prefix cannot be empty"); + } + /** * Tests the NSSummaryEndpoint for a given volume, bucket, and directory structure. From 12a540dcd9f906dc83b93da2efeb206fc73d619f Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 13 Mar 2024 15:57:29 +0530 Subject: [PATCH 11/32] Minor changes --- .../api/TestOMDBInsightSearchEndpoint.java | 21 ++++++------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index fda45e096d39..cebce2be18cd 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -253,7 +253,7 @@ public void testSearchOpenKeysWithNoMatchFound() throws IOException { response.getStatus(), "Expected a 404 NOT FOUND status"); String entity = (String) response.getEntity(); - assertTrue(entity.contains("No keys exist for the specified search prefix"), + assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); } @@ -356,16 +356,11 @@ private void populateOMDB() throws Exception { bucketA2ObjectId, volAObjectId, 1000); // Files directly under bucketB1 - createOpenKey("fileB1", "bucketB1", "volB", "fileB1", bucketB1ObjectId, - bucketB1ObjectId, volBObjectId, 1000); - createOpenKey("fileB2", "bucketB1", "volB", "fileB2", bucketB1ObjectId, - bucketB1ObjectId, volBObjectId, 1000); - createOpenKey("fileB3", "bucketB1", "volB", "fileB3", bucketB1ObjectId, - bucketB1ObjectId, volBObjectId, 1000); - createOpenKey("fileB4", "bucketB1", "volB", "fileB4", bucketB1ObjectId, - bucketB1ObjectId, volBObjectId, 1000); - createOpenKey("fileB5", "bucketB1", "volB", "fileB5", bucketB1ObjectId, - bucketB1ObjectId, volBObjectId, 1000); + createOpenKey("fileB1", "bucketB1", "volB", 1000); + createOpenKey("fileB2", "bucketB1", "volB", 1000); + createOpenKey("fileB3", "bucketB1", "volB", 1000); + createOpenKey("fileB4", "bucketB1", "volB", 1000); + createOpenKey("fileB5", "bucketB1", "volB", 1000); // Create Inner files under directories createOpenFile("dirA1/innerFile", "bucketA1", "volA", "innerFile", @@ -478,10 +473,6 @@ private long createOpenFile(String key, private long createOpenKey(String key, String bucket, String volume, - String fileName, - long parentObjectId, - long bucketObjectId, - long volumeObjectId, long dataSize) throws IOException { long objectId = UUID.randomUUID().getMostSignificantBits() & Long.MAX_VALUE; // Ensure positive ID From f692c664a5e7323a72b33e5079df2da35dca49d9 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 17 Mar 2024 17:35:45 +0530 Subject: [PATCH 12/32] Made review changes --- .../hadoop/ozone/recon/ReconConstants.java | 2 + .../recon/api/OMDBInsightSearchEndpoint.java | 77 +++++++++---------- .../api/TestOMDBInsightSearchEndpoint.java | 2 +- 3 files changed, 41 insertions(+), 40 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index 134092146e54..c084e81a4d24 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -38,12 +38,14 @@ private ReconConstants() { // By default, limit the number of results returned public static final String DEFAULT_OPEN_KEY_INCLUDE_NON_FSO = "false"; public static final String DEFAULT_OPEN_KEY_INCLUDE_FSO = "false"; + public static final String DEFAULT_SEARCH_PREFIX = "/"; public static final String DEFAULT_FETCH_COUNT = "1000"; public static final String DEFAULT_BATCH_NUMBER = "1"; public static final String RECON_QUERY_BATCH_PARAM = "batchNum"; public static final String RECON_QUERY_PREVKEY = "prevKey"; public static final String RECON_OPEN_KEY_INCLUDE_NON_FSO = "includeNonFso"; public static final String RECON_OPEN_KEY_INCLUDE_FSO = "includeFso"; + public static final String RECON_OPEN_KEY_SEARCH_LIMIT = "10"; public static final String RECON_QUERY_FILTER = "missingIn"; public static final String PREV_CONTAINER_ID_DEFAULT_VALUE = "0"; public static final String PREV_DELETED_BLOCKS_TRANSACTION_ID_DEFAULT_VALUE = diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 49f05042e5bd..61739f14b9ed 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -23,7 +23,10 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; @@ -50,10 +53,7 @@ import java.util.Set; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_FSO; -import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_OPEN_KEY_INCLUDE_NON_FSO; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_INCLUDE_FSO; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_INCLUDE_NON_FSO; +import static org.apache.hadoop.ozone.recon.ReconConstants.*; import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath; @@ -93,31 +93,30 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, * the prefix against the start of keys' names within the OM DB. * * Example Usage: - * 1. A searchPrefix of "/" will return all keys in the database. - * 2. A searchPrefix of "/volA/" retrieves every key under volume 'volA'. - * 3. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' inside 'bucketA' of 'volA'. + * 1. A startPrefix of "/" will return all keys in the database. + * 2. A startPrefix of "/volA/" retrieves every key under volume 'volA'. + * 3. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' inside 'bucketA' of 'volA'. * - * @param searchPrefix The prefix for searching keys, starting from the root ('/') or any specific path. - * @param includeFso Indicates whether to include FSO layout keys in the search. + * @param startPrefix The prefix for searching keys, starting from the root ('/') or any specific path. + * @param includeFso Indicates whether to include FSO layout keys in the search. * @param includeNonFso Indicates whether to include non-FSO layout keys in the search. - * @param limit Limits the number of returned keys. + * @param limit Limits the number of returned keys. * @return A KeyInsightInfoResponse, containing matching keys and their data sizes. * @throws IOException On failure to access the OM database or process the operation. */ @GET @Path("/openKeys/search") public Response searchOpenKeys( - @QueryParam("searchPrefix") - String searchPrefix, - @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_FSO) @QueryParam(RECON_OPEN_KEY_INCLUDE_FSO) + @DefaultValue(DEFAULT_SEARCH_PREFIX) @QueryParam("startPrefix") + String startPrefix, + @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_FSO) @QueryParam("includeFso") boolean includeFso, - @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_NON_FSO) @QueryParam(RECON_OPEN_KEY_INCLUDE_NON_FSO) + @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_NON_FSO) @QueryParam("includeNonFso") boolean includeNonFso, - @DefaultValue("10") @QueryParam("limit") + @DefaultValue(RECON_OPEN_KEY_SEARCH_LIMIT) @QueryParam("limit") int limit) throws IOException { - if (searchPrefix == null || searchPrefix.trim().isEmpty()) { - return createBadRequestResponse( - "The searchPrefix query parameter is required."); + if (limit < 0) { + return createBadRequestResponse("Limit cannot be negative."); } KeyInsightInfoResponse insightResponse = new KeyInsightInfoResponse(); @@ -128,7 +127,7 @@ public Response searchOpenKeys( // Fetch keys from OBS layout and convert them into KeyEntityInfo objects Map obsKeys = new LinkedHashMap<>(); if (includeNonFso) { - obsKeys = searchOpenKeysInOBS(searchPrefix, limit); + obsKeys = searchOpenKeysInOBS(startPrefix, limit); for (Map.Entry entry : obsKeys.entrySet()) { keysFound = true; KeyEntityInfo keyEntityInfo = @@ -142,7 +141,7 @@ public Response searchOpenKeys( // Fetch keys from FSO layout, if the limit is not yet reached if (includeFso) { - Map fsoKeys = searchOpenKeysInFSO(searchPrefix, limit); + Map fsoKeys = searchOpenKeysInFSO(startPrefix, limit); for (Map.Entry entry : fsoKeys.entrySet()) { keysFound = true; KeyEntityInfo keyEntityInfo = @@ -156,7 +155,7 @@ public Response searchOpenKeys( // If no keys were found, return a response indicating that no keys matched if (!keysFound) { - return noMatchedKeysResponse(searchPrefix); + return noMatchedKeysResponse(startPrefix); } // Set the aggregated totals in the response @@ -187,8 +186,7 @@ private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, } - public Map searchOpenKeysInOBS(String searchPrefix, - int limit) + public Map searchOpenKeysInOBS(String startPrefix, int limit) throws IOException { Map matchedKeys = @@ -199,32 +197,32 @@ public Map searchOpenKeysInOBS(String searchPrefix, try ( TableIterator> keyIter = openKeyTable.iterator()) { - keyIter.seek(searchPrefix); + keyIter.seek(startPrefix); while (keyIter.hasNext() && matchedKeys.size() < limit) { Table.KeyValue entry = keyIter.next(); String dbKey = entry.getKey(); // Get the DB key - if (!dbKey.startsWith(searchPrefix)) { + if (!dbKey.startsWith(startPrefix)) { break; // Exit the loop if the key no longer matches the prefix } // Add the DB key and OmKeyInfo object to the map matchedKeys.put(dbKey, entry.getValue()); } - } catch (NullPointerException | IOException exception) { + } catch (IOException exception) { createInternalServerErrorResponse( - "Error retrieving keys from openKeyTable for path: " + searchPrefix); + "Error retrieving keys from openKeyTable for path: " + startPrefix); } return matchedKeys; } - public Map searchOpenKeysInFSO(String searchPrefix, + public Map searchOpenKeysInFSO(String startPrefix, int limit) throws IOException { Map matchedKeys = new LinkedHashMap<>(); // Convert the search prefix to an object path for FSO buckets - String searchPrefixObjectPath = convertToObjectPath(searchPrefix); - String[] names = parseRequestPath(searchPrefixObjectPath); + String startPrefixObjectPath = convertToObjectPath(startPrefix); + String[] names = parseRequestPath(startPrefixObjectPath); // If names.length > 2, then the search prefix is at the volume or bucket level hence // no need to find parent or extract id's or find subpaths as the openFileTable is @@ -242,7 +240,7 @@ public Map searchOpenKeysInFSO(String searchPrefix, List subPaths = new ArrayList<>(); // Add the initial search prefix object path because it can have both openFiles // and sub-directories with openFiles - subPaths.add(searchPrefixObjectPath); + subPaths.add(startPrefixObjectPath); // Recursively gather all subpaths gatherSubPaths(parentId, subPaths, names); @@ -259,7 +257,7 @@ public Map searchOpenKeysInFSO(String searchPrefix, } // Iterate over for bucket and volume level search - matchedKeys.putAll(retrieveKeysFromOpenFileTable(searchPrefixObjectPath, + matchedKeys.putAll(retrieveKeysFromOpenFileTable(startPrefixObjectPath, limit - matchedKeys.size())); return matchedKeys; } @@ -287,7 +285,7 @@ private Map retrieveKeysFromOpenFileTable(String subPath, // Add the DB key and OmKeyInfo object to the map matchedKeys.put(dbKey, entry.getValue()); } - } catch (NullPointerException | IOException exception) { + } catch (IOException exception) { createInternalServerErrorResponse( "Error retrieving keys from openFileTable for path: " + subPath); } @@ -410,14 +408,15 @@ private String constructObjectPathWithPrefix(long... ids) { /** * Returns a response indicating that no keys matched the search prefix. * - * @param searchPrefix The search prefix that was used. + * @param startPrefix The search prefix that was used. * @return The response indicating that no keys matched the search prefix. */ - private Response noMatchedKeysResponse(String searchPrefix) { - String message = String.format( - "No keys matched the search prefix: '%s'.", searchPrefix); + private Response noMatchedKeysResponse(String startPrefix) { + String jsonResponse = String.format( + "{\"message\": \"No keys matched the search prefix: '%s'.\"}", + startPrefix); return Response.status(Response.Status.NOT_FOUND) - .entity(message) + .entity(jsonResponse) .type(MediaType.APPLICATION_JSON) .build(); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index cebce2be18cd..66dd44301a10 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -270,7 +270,7 @@ public void testSearchOpenKeysWithBadRequest() throws IOException { response.getStatus(), "Expected a 400 BAD REQUEST status"); String entity = (String) response.getEntity(); - assertTrue(entity.contains("The searchPrefix query parameter is required."), + assertTrue(entity.contains("The startPrefix query parameter is required."), "Expected a message indicating the search prefix cannot be empty"); } From 61f37253d1120eed3a3c69f33011c844dc6c4080 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 17 Mar 2024 19:38:20 +0530 Subject: [PATCH 13/32] Did a bit of code refactoring --- .../recon/api/OMDBInsightSearchEndpoint.java | 143 +++++++----------- .../api/TestOMDBInsightSearchEndpoint.java | 12 +- 2 files changed, 63 insertions(+), 92 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 61739f14b9ed..2e6acf28eb5c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -127,7 +127,8 @@ public Response searchOpenKeys( // Fetch keys from OBS layout and convert them into KeyEntityInfo objects Map obsKeys = new LinkedHashMap<>(); if (includeNonFso) { - obsKeys = searchOpenKeysInOBS(startPrefix, limit); + Table openKeyTable = omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); + obsKeys = retrieveKeysFromTable(openKeyTable, startPrefix, limit); for (Map.Entry entry : obsKeys.entrySet()) { keysFound = true; KeyEntityInfo keyEntityInfo = @@ -165,57 +166,6 @@ public Response searchOpenKeys( return Response.ok(insightResponse).build(); } - /** - * Creates a KeyEntityInfo object from an OmKeyInfo object and the corresponding key. - * - * @param dbKey The key in the database corresponding to the OmKeyInfo object. - * @param keyInfo The OmKeyInfo object to create the KeyEntityInfo from. - * @return The KeyEntityInfo object created from the OmKeyInfo object and the key. - */ - private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, - OmKeyInfo keyInfo) { - KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); - keyEntityInfo.setKey(dbKey); // Set the DB key - keyEntityInfo.setPath( - keyInfo.getKeyName()); // Assuming path is the same as key name - keyEntityInfo.setInStateSince(keyInfo.getCreationTime()); - keyEntityInfo.setSize(keyInfo.getDataSize()); - keyEntityInfo.setReplicatedSize(keyInfo.getReplicatedSize()); - keyEntityInfo.setReplicationConfig(keyInfo.getReplicationConfig()); - return keyEntityInfo; - } - - - public Map searchOpenKeysInOBS(String startPrefix, int limit) - throws IOException { - - Map matchedKeys = - new LinkedHashMap<>(); // Preserves the insertion order - Table openKeyTable = - omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); - - try ( - TableIterator> - keyIter = openKeyTable.iterator()) { - keyIter.seek(startPrefix); - while (keyIter.hasNext() && matchedKeys.size() < limit) { - Table.KeyValue entry = keyIter.next(); - String dbKey = entry.getKey(); // Get the DB key - if (!dbKey.startsWith(startPrefix)) { - break; // Exit the loop if the key no longer matches the prefix - } - // Add the DB key and OmKeyInfo object to the map - matchedKeys.put(dbKey, entry.getValue()); - } - } catch (IOException exception) { - createInternalServerErrorResponse( - "Error retrieving keys from openKeyTable for path: " + startPrefix); - } - - return matchedKeys; - } - - public Map searchOpenKeysInFSO(String startPrefix, int limit) throws IOException { @@ -223,6 +173,7 @@ public Map searchOpenKeysInFSO(String startPrefix, // Convert the search prefix to an object path for FSO buckets String startPrefixObjectPath = convertToObjectPath(startPrefix); String[] names = parseRequestPath(startPrefixObjectPath); + Table openFileTable = omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); // If names.length > 2, then the search prefix is at the volume or bucket level hence // no need to find parent or extract id's or find subpaths as the openFileTable is @@ -248,7 +199,7 @@ public Map searchOpenKeysInFSO(String startPrefix, // Iterate over the subpaths and retrieve the open files for (String subPath : subPaths) { matchedKeys.putAll( - retrieveKeysFromOpenFileTable(subPath, limit - matchedKeys.size())); + retrieveKeysFromTable(openFileTable, subPath, limit - matchedKeys.size())); if (matchedKeys.size() >= limit) { break; } @@ -257,42 +208,10 @@ public Map searchOpenKeysInFSO(String startPrefix, } // Iterate over for bucket and volume level search - matchedKeys.putAll(retrieveKeysFromOpenFileTable(startPrefixObjectPath, - limit - matchedKeys.size())); - return matchedKeys; - } - - - private Map retrieveKeysFromOpenFileTable(String subPath, - int limit) - throws IOException { - - Map matchedKeys = - new LinkedHashMap<>(); // Preserves the insertion order - Table openFileTable = - omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); - - try ( - TableIterator> keyIter = - openFileTable.iterator()) { - keyIter.seek(subPath); - while (keyIter.hasNext() && matchedKeys.size() < limit) { - Table.KeyValue entry = keyIter.next(); - String dbKey = entry.getKey(); // Get the DB key - if (!dbKey.startsWith(subPath)) { - break; // Exit the loop if the key no longer matches the prefix - } - // Add the DB key and OmKeyInfo object to the map - matchedKeys.put(dbKey, entry.getValue()); - } - } catch (IOException exception) { - createInternalServerErrorResponse( - "Error retrieving keys from openFileTable for path: " + subPath); - } + matchedKeys.putAll(retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit)); return matchedKeys; } - /** * Finds all subdirectories under a parent directory in an FSO bucket. It builds * a list of paths for these subdirectories. These sub-directories are then used @@ -391,6 +310,58 @@ public String convertToObjectPath(String prevKeyPrefix) { } } + /** + * Common method to retrieve keys from a table based on a search prefix and a limit. + * + * @param table The table to retrieve keys from. + * @param startPrefix The search prefix to match keys against. + * @param limit The maximum number of keys to retrieve. + * @return A map of keys and their corresponding OmKeyInfo objects. + * @throws IOException If there are problems accessing the table. + */ + private Map retrieveKeysFromTable( + Table table, String startPrefix, int limit) + throws IOException { + Map matchedKeys = new LinkedHashMap<>(); + try ( + TableIterator> keyIter = table.iterator()) { + keyIter.seek(startPrefix); + while (keyIter.hasNext() && matchedKeys.size() < limit) { + Table.KeyValue entry = keyIter.next(); + String dbKey = entry.getKey(); + if (!dbKey.startsWith(startPrefix)) { + break; // Exit the loop if the key no longer matches the prefix + } + matchedKeys.put(dbKey, entry.getValue()); + } + } catch (IOException exception) { + LOG.error("Error retrieving keys from table for path: {}", startPrefix, + exception); + throw exception; + } + return matchedKeys; + } + + /** + * Creates a KeyEntityInfo object from an OmKeyInfo object and the corresponding key. + * + * @param dbKey The key in the database corresponding to the OmKeyInfo object. + * @param keyInfo The OmKeyInfo object to create the KeyEntityInfo from. + * @return The KeyEntityInfo object created from the OmKeyInfo object and the key. + */ + private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, + OmKeyInfo keyInfo) { + KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); + keyEntityInfo.setKey(dbKey); // Set the DB key + keyEntityInfo.setPath( + keyInfo.getKeyName()); // Assuming path is the same as key name + keyEntityInfo.setInStateSince(keyInfo.getCreationTime()); + keyEntityInfo.setSize(keyInfo.getDataSize()); + keyEntityInfo.setReplicatedSize(keyInfo.getReplicatedSize()); + keyEntityInfo.setReplicationConfig(keyInfo.getReplicationConfig()); + return keyEntityInfo; + } + /** * Constructs an object path with the given IDs. * diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 66dd44301a10..fa2ac2ae5a13 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -259,19 +259,19 @@ public void testSearchOpenKeysWithNoMatchFound() throws IOException { @Test public void testSearchOpenKeysWithBadRequest() throws IOException { - // Given a search prefix that is empty - String searchPrefix = ""; - + // Give a negative limit + int negativeLimit = -1; Response response = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefix, true, true, 10); + omdbInsightSearchEndpoint.searchOpenKeys("searchPrefix", true, true, + negativeLimit); // Then the response should indicate that the request was bad assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus(), "Expected a 400 BAD REQUEST status"); String entity = (String) response.getEntity(); - assertTrue(entity.contains("The startPrefix query parameter is required."), - "Expected a message indicating the search prefix cannot be empty"); + assertTrue(entity.contains("Limit cannot be negative."), + "Expected a message indicating the limit was negative"); } From 57ccc8e15b2bc3edfa60c4b69a2f8a9d9ffa883a Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 20 Mar 2024 13:29:29 +0530 Subject: [PATCH 14/32] Made review comments --- .../hadoop/ozone/recon/ReconConstants.java | 4 +- .../recon/api/OMDBInsightSearchEndpoint.java | 127 +++++++++++------- .../recon/api/handlers/EntityHandler.java | 2 +- .../api/TestOMDBInsightSearchEndpoint.java | 23 ++-- 4 files changed, 89 insertions(+), 67 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index c084e81a4d24..bc63437359d0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -38,14 +38,14 @@ private ReconConstants() { // By default, limit the number of results returned public static final String DEFAULT_OPEN_KEY_INCLUDE_NON_FSO = "false"; public static final String DEFAULT_OPEN_KEY_INCLUDE_FSO = "false"; - public static final String DEFAULT_SEARCH_PREFIX = "/"; + public static final String DEFAULT_START_PREFIX = "/"; public static final String DEFAULT_FETCH_COUNT = "1000"; public static final String DEFAULT_BATCH_NUMBER = "1"; public static final String RECON_QUERY_BATCH_PARAM = "batchNum"; public static final String RECON_QUERY_PREVKEY = "prevKey"; public static final String RECON_OPEN_KEY_INCLUDE_NON_FSO = "includeNonFso"; public static final String RECON_OPEN_KEY_INCLUDE_FSO = "includeFso"; - public static final String RECON_OPEN_KEY_SEARCH_LIMIT = "10"; + public static final String RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT = "30"; public static final String RECON_QUERY_FILTER = "missingIn"; public static final String PREV_CONTAINER_ID_DEFAULT_VALUE = "0"; public static final String PREV_DELETED_BLOCKS_TRANSACTION_ID_DEFAULT_VALUE = diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 2e6acf28eb5c..e062b3f38e5e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -18,21 +18,18 @@ package org.apache.hadoop.ozone.recon.api; -import org.antlr.v4.runtime.misc.Pair; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,7 +58,7 @@ /** * REST endpoint for search implementation in OM DB Insight. */ -@Path("/insights") +@Path("/keys") @Produces(MediaType.APPLICATION_JSON) @AdminOnly public class OMDBInsightSearchEndpoint { @@ -85,7 +82,7 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, /** * Performs a search for open keys in the Ozone Manager (OM) database using a specified search prefix. - * This endpoint can search across both File System Optimized (FSO) and Object Store (non-FSO) layouts, + * This endpoint searches across both File System Optimized (FSO) and Object Store (non-FSO) layouts, * compiling a list of keys that match the given prefix along with their data sizes. * * The search prefix may range from the root level ('/') to any specific directory @@ -97,73 +94,63 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, * 2. A startPrefix of "/volA/" retrieves every key under volume 'volA'. * 3. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' inside 'bucketA' of 'volA'. * - * @param startPrefix The prefix for searching keys, starting from the root ('/') or any specific path. - * @param includeFso Indicates whether to include FSO layout keys in the search. - * @param includeNonFso Indicates whether to include non-FSO layout keys in the search. - * @param limit Limits the number of returned keys. + * @param startPrefix The prefix for searching keys, starting from the root ('/') or any specific path. + * @param limit Limits the number of returned keys. * @return A KeyInsightInfoResponse, containing matching keys and their data sizes. * @throws IOException On failure to access the OM database or process the operation. */ @GET - @Path("/openKeys/search") + @Path("/open/search") public Response searchOpenKeys( - @DefaultValue(DEFAULT_SEARCH_PREFIX) @QueryParam("startPrefix") + @DefaultValue(DEFAULT_START_PREFIX) @QueryParam("startPrefix") String startPrefix, - @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_FSO) @QueryParam("includeFso") - boolean includeFso, - @DefaultValue(DEFAULT_OPEN_KEY_INCLUDE_NON_FSO) @QueryParam("includeNonFso") - boolean includeNonFso, - @DefaultValue(RECON_OPEN_KEY_SEARCH_LIMIT) @QueryParam("limit") + @DefaultValue(RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT) @QueryParam("limit") int limit) throws IOException { - if (limit < 0) { - return createBadRequestResponse("Limit cannot be negative."); - } - - KeyInsightInfoResponse insightResponse = new KeyInsightInfoResponse(); - long replicatedTotal = 0; - long unreplicatedTotal = 0; - boolean keysFound = false; // Flag to track if any keys are found - - // Fetch keys from OBS layout and convert them into KeyEntityInfo objects - Map obsKeys = new LinkedHashMap<>(); - if (includeNonFso) { + try { + limit = Math.max(0, limit); // Ensure limit is non-negative + KeyInsightInfoResponse insightResponse = new KeyInsightInfoResponse(); + long replicatedTotal = 0; + long unreplicatedTotal = 0; + boolean keysFound = false; // Flag to track if any keys are found + + // Search keys from non-FSO layout. + Map obsKeys = new LinkedHashMap<>(); Table openKeyTable = omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); obsKeys = retrieveKeysFromTable(openKeyTable, startPrefix, limit); for (Map.Entry entry : obsKeys.entrySet()) { keysFound = true; KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - insightResponse.getNonFSOKeyInfoList() - .add(keyEntityInfo); // Add to non-FSO list + insightResponse.getNonFSOKeyInfoList().add(keyEntityInfo); // Add to non-FSO list replicatedTotal += entry.getValue().getReplicatedSize(); unreplicatedTotal += entry.getValue().getDataSize(); } - } - // Fetch keys from FSO layout, if the limit is not yet reached - if (includeFso) { + // Search keys from FSO layout. Map fsoKeys = searchOpenKeysInFSO(startPrefix, limit); for (Map.Entry entry : fsoKeys.entrySet()) { keysFound = true; KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - insightResponse.getFsoKeyInfoList() - .add(keyEntityInfo); // Add to FSO list + insightResponse.getFsoKeyInfoList().add(keyEntityInfo); // Add to FSO list replicatedTotal += entry.getValue().getReplicatedSize(); unreplicatedTotal += entry.getValue().getDataSize(); } - } - // If no keys were found, return a response indicating that no keys matched - if (!keysFound) { - return noMatchedKeysResponse(startPrefix); - } + // If no keys were found, return a response indicating that no keys matched + if (!keysFound) { + return noMatchedKeysResponse(startPrefix); + } - // Set the aggregated totals in the response - insightResponse.setReplicatedDataSize(replicatedTotal); - insightResponse.setUnreplicatedDataSize(unreplicatedTotal); + // Set the aggregated totals in the response + insightResponse.setReplicatedDataSize(replicatedTotal); + insightResponse.setUnreplicatedDataSize(unreplicatedTotal); - return Response.ok(insightResponse).build(); + return Response.ok(insightResponse).build(); + } catch (IOException e) { + return createInternalServerErrorResponse( + "Error searching open keys in OM DB: " + e.getMessage()); + } } public Map searchOpenKeysInFSO(String startPrefix, @@ -190,7 +177,7 @@ public Map searchOpenKeysInFSO(String startPrefix, } List subPaths = new ArrayList<>(); // Add the initial search prefix object path because it can have both openFiles - // and sub-directories with openFiles + // and subdirectories with openFiles subPaths.add(startPrefixObjectPath); // Recursively gather all subpaths @@ -270,14 +257,15 @@ private void gatherSubPaths(long parentId, List subPaths, * @throws IOException If database access fails. */ public String convertToObjectPath(String prevKeyPrefix) { - if (prevKeyPrefix.isEmpty()) { - return ""; - } - try { String[] names = parseRequestPath(normalizePath(prevKeyPrefix)); - // Fetch the volumeID + // Root-Level :- Return the original path + if (names.length == 0) { + return prevKeyPrefix; + } + + // Volume-Level :- Fetch the volumeID String volumeName = names[0]; String volumeKey = omMetadataManager.getVolumeKey(volumeName); long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey).getObjectID(); @@ -285,7 +273,7 @@ public String convertToObjectPath(String prevKeyPrefix) { return constructObjectPathWithPrefix(volumeId); } - // Fetch the bucketID + // Bucket-Level :- Fetch the bucketID String bucketName = names[1]; String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo bucketInfo = @@ -376,6 +364,41 @@ private String constructObjectPathWithPrefix(long... ids) { return pathBuilder.toString(); } + /** + * Validates volume or bucket names according to specific rules. + * + * @param resName The name to validate (volume or bucket). + * @return A Response object if validation fails, or null if the name is valid. + */ + public Response validateNames(String resName) { + if (resName == null) { + return createBadRequestResponse("Volume or Bucket name cannot be null"); + } + + if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH || + resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) { + return createBadRequestResponse( + "Bucket or Volume name must be between 3 and 63 characters"); + } + + if (resName.charAt(0) == '.' || resName.charAt(0) == '-' || + resName.charAt(resName.length() - 1) == '.' || + resName.charAt(resName.length() - 1) == '-') { + return createBadRequestResponse( + "Bucket or Volume name cannot start or end with a period or dash"); + } + + // Regex to check for lowercase letters, numbers, hyphens, underscores, and periods only. + if (!resName.matches("^[a-z0-9._-]+$")) { + return createBadRequestResponse( + "Bucket or Volume name can only include lowercase letters, numbers," + + " hyphens, underscores, and periods"); + } + + // If all checks pass, the name is valid + return null; + } + /** * Returns a response indicating that no keys matched the search prefix. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index 4ed5a11329c8..734008de8f3b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -248,7 +248,7 @@ protected long getTotalSize(long objectId) throws IOException { return totalSize; } - public static String[] parseRequestPath(String path) { + public static String[] parseRequestPath(String path) { // path = /abcd if (path.startsWith(OM_KEY_PREFIX)) { path = path.substring(1); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index fa2ac2ae5a13..1f5027de0f2d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -144,7 +144,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( @Test public void testRootLevelSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, true, true, 20); + omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, 20); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -156,14 +156,14 @@ public void testRootLevelSearch() throws IOException { // Switch of the include Fso flag response = - omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, false, true, 20); + omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, 20); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(0, result.getFsoKeyInfoList().size()); assertEquals(5, result.getNonFSOKeyInfoList().size()); // Switch of the include Non Fso flag response = - omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, true, false, 20); + omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, 20); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(10, result.getFsoKeyInfoList().size()); } @@ -171,7 +171,7 @@ public void testRootLevelSearch() throws IOException { @Test public void testBucketLevelSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1", true, true, 20); + omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1",20); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -182,7 +182,7 @@ public void testBucketLevelSearch() throws IOException { assertEquals(5000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/volB/bucketB1", true, true, 20); + omdbInsightSearchEndpoint.searchOpenKeys("/volB/bucketB1", 20); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -196,7 +196,7 @@ public void testBucketLevelSearch() throws IOException { @Test public void testDirectoryLevelSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA1", true, true, 20); + omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA1", 20); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -207,7 +207,7 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA2", true, true, 20); + omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA2", 20); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -218,7 +218,7 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA3", true, true, 20); + omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA3", 20); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -232,7 +232,7 @@ public void testDirectoryLevelSearch() throws IOException { @Test public void testLimitSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, true, true, 5); + omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, 5); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -246,7 +246,7 @@ public void testSearchOpenKeysWithNoMatchFound() throws IOException { String searchPrefix = "nonexistentKeyPrefix"; Response response = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefix, true, true, 10); + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefix, 10); // Then the response should indicate that no keys were found assertEquals(Response.Status.NOT_FOUND.getStatusCode(), @@ -262,8 +262,7 @@ public void testSearchOpenKeysWithBadRequest() throws IOException { // Give a negative limit int negativeLimit = -1; Response response = - omdbInsightSearchEndpoint.searchOpenKeys("searchPrefix", true, true, - negativeLimit); + omdbInsightSearchEndpoint.searchOpenKeys("searchPrefix", negativeLimit); // Then the response should indicate that the request was bad assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), From 132ed900398fbf1d554af5a4f66bb4558d77ae8d Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 20 Mar 2024 13:45:49 +0530 Subject: [PATCH 15/32] Adjusted the exception handling --- .../recon/api/OMDBInsightSearchEndpoint.java | 74 ++++++++++--------- 1 file changed, 40 insertions(+), 34 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index e062b3f38e5e..df645ee890a7 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -84,11 +84,11 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, * Performs a search for open keys in the Ozone Manager (OM) database using a specified search prefix. * This endpoint searches across both File System Optimized (FSO) and Object Store (non-FSO) layouts, * compiling a list of keys that match the given prefix along with their data sizes. - * + *

* The search prefix may range from the root level ('/') to any specific directory * or key level (e.g., '/volA/' for everything under 'volA'). The search operation matches * the prefix against the start of keys' names within the OM DB. - * + *

* Example Usage: * 1. A startPrefix of "/" will return all keys in the database. * 2. A startPrefix of "/volA/" retrieves every key under volume 'volA'. @@ -115,13 +115,15 @@ public Response searchOpenKeys( // Search keys from non-FSO layout. Map obsKeys = new LinkedHashMap<>(); - Table openKeyTable = omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); + Table openKeyTable = + omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); obsKeys = retrieveKeysFromTable(openKeyTable, startPrefix, limit); for (Map.Entry entry : obsKeys.entrySet()) { keysFound = true; KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - insightResponse.getNonFSOKeyInfoList().add(keyEntityInfo); // Add to non-FSO list + insightResponse.getNonFSOKeyInfoList() + .add(keyEntityInfo); // Add to non-FSO list replicatedTotal += entry.getValue().getReplicatedSize(); unreplicatedTotal += entry.getValue().getDataSize(); } @@ -132,7 +134,8 @@ public Response searchOpenKeys( keysFound = true; KeyEntityInfo keyEntityInfo = createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue()); - insightResponse.getFsoKeyInfoList().add(keyEntityInfo); // Add to FSO list + insightResponse.getFsoKeyInfoList() + .add(keyEntityInfo); // Add to FSO list replicatedTotal += entry.getValue().getReplicatedSize(); unreplicatedTotal += entry.getValue().getDataSize(); } @@ -150,17 +153,21 @@ public Response searchOpenKeys( } catch (IOException e) { return createInternalServerErrorResponse( "Error searching open keys in OM DB: " + e.getMessage()); + } catch (IllegalArgumentException e) { + return createBadRequestResponse( + "Invalid startPrefix: " + e.getMessage()); } } public Map searchOpenKeysInFSO(String startPrefix, int limit) - throws IOException { + throws IOException, IllegalArgumentException { Map matchedKeys = new LinkedHashMap<>(); // Convert the search prefix to an object path for FSO buckets String startPrefixObjectPath = convertToObjectPath(startPrefix); String[] names = parseRequestPath(startPrefixObjectPath); - Table openFileTable = omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + Table openFileTable = + omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); // If names.length > 2, then the search prefix is at the volume or bucket level hence // no need to find parent or extract id's or find subpaths as the openFileTable is @@ -186,7 +193,8 @@ public Map searchOpenKeysInFSO(String startPrefix, // Iterate over the subpaths and retrieve the open files for (String subPath : subPaths) { matchedKeys.putAll( - retrieveKeysFromTable(openFileTable, subPath, limit - matchedKeys.size())); + retrieveKeysFromTable(openFileTable, subPath, + limit - matchedKeys.size())); if (matchedKeys.size() >= limit) { break; } @@ -195,7 +203,8 @@ public Map searchOpenKeysInFSO(String startPrefix, } // Iterate over for bucket and volume level search - matchedKeys.putAll(retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit)); + matchedKeys.putAll( + retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit)); return matchedKeys; } @@ -203,7 +212,7 @@ public Map searchOpenKeysInFSO(String startPrefix, * Finds all subdirectories under a parent directory in an FSO bucket. It builds * a list of paths for these subdirectories. These sub-directories are then used * to search for open files in the openFileTable. - * + *

* How it works: * - Starts from a parent directory identified by parentId. * - Looks through all child directories of this parent. @@ -256,8 +265,9 @@ private void gatherSubPaths(long parentId, List subPaths, * @return The object path as "/volumeID/bucketID/ParentId/". * @throws IOException If database access fails. */ - public String convertToObjectPath(String prevKeyPrefix) { - try { + public String convertToObjectPath(String prevKeyPrefix) + throws IOException, IllegalArgumentException { + String[] names = parseRequestPath(normalizePath(prevKeyPrefix)); // Root-Level :- Return the original path @@ -267,14 +277,17 @@ public String convertToObjectPath(String prevKeyPrefix) { // Volume-Level :- Fetch the volumeID String volumeName = names[0]; + validateNames(volumeName); String volumeKey = omMetadataManager.getVolumeKey(volumeName); - long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey).getObjectID(); + long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey) + .getObjectID(); if (names.length == 1) { return constructObjectPathWithPrefix(volumeId); } // Bucket-Level :- Fetch the bucketID String bucketName = names[1]; + validateNames(bucketName); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().getSkipCache(bucketKey); @@ -285,17 +298,10 @@ public String convertToObjectPath(String prevKeyPrefix) { // Fetch the immediate parentID which could be a directory or the bucket itself BucketHandler handler = - getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); + getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, + reconSCM, bucketInfo); long dirObjectId = handler.getDirInfo(names).getObjectID(); return constructObjectPathWithPrefix(volumeId, bucketId, dirObjectId); - - } catch (IOException e) { - LOG.error("Error converting key prefix to object path: {}", prevKeyPrefix, e); - return prevKeyPrefix; // Fallback to original prefix in case of exception - } catch (Exception e) { - LOG.error("Unexpected error during conversion: {}", prevKeyPrefix, e); - return prevKeyPrefix; - } } /** @@ -370,29 +376,29 @@ private String constructObjectPathWithPrefix(long... ids) { * @param resName The name to validate (volume or bucket). * @return A Response object if validation fails, or null if the name is valid. */ - public Response validateNames(String resName) { - if (resName == null) { - return createBadRequestResponse("Volume or Bucket name cannot be null"); - } - + public Response validateNames(String resName) + throws IllegalArgumentException { if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH || resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) { - return createBadRequestResponse( - "Bucket or Volume name must be between 3 and 63 characters"); + throw new IllegalArgumentException( + "Bucket or Volume name length should be between " + + OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH + " and " + + OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH); } if (resName.charAt(0) == '.' || resName.charAt(0) == '-' || resName.charAt(resName.length() - 1) == '.' || resName.charAt(resName.length() - 1) == '-') { - return createBadRequestResponse( - "Bucket or Volume name cannot start or end with a period or dash"); + throw new IllegalArgumentException( + "Bucket or Volume name cannot start or end with " + + "hyphen or period"); } // Regex to check for lowercase letters, numbers, hyphens, underscores, and periods only. if (!resName.matches("^[a-z0-9._-]+$")) { - return createBadRequestResponse( - "Bucket or Volume name can only include lowercase letters, numbers," + - " hyphens, underscores, and periods"); + throw new IllegalArgumentException( + "Bucket or Volume name can only contain lowercase " + + "letters, numbers, hyphens, underscores, and periods"); } // If all checks pass, the name is valid From fee245ed05ded5a53d6d0710cfca7e774267afd4 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 6 May 2024 17:28:50 +0530 Subject: [PATCH 16/32] Removed the volumeID and bucketID initilisation from inside the loop to the outside --- .../java/org/apache/hadoop/ozone/recon/ReconConstants.java | 2 +- .../hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index 1218bd4308cb..359eb1b6fd5c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -50,7 +50,7 @@ private ReconConstants() { public static final String RECON_QUERY_PREVKEY = "prevKey"; public static final String RECON_OPEN_KEY_INCLUDE_NON_FSO = "includeNonFso"; public static final String RECON_OPEN_KEY_INCLUDE_FSO = "includeFso"; - public static final String RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT = "30"; + public static final String RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT = "1000"; public static final String RECON_QUERY_FILTER = "missingIn"; public static final String PREV_CONTAINER_ID_DEFAULT_VALUE = "0"; public static final String PREV_DELETED_BLOCKS_TRANSACTION_ID_DEFAULT_VALUE = diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 8c068618ed24..bc9b5ab8d170 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -234,15 +234,14 @@ private void gatherSubPaths(long parentId, List subPaths, if (parentSummary == null) { return; } - + long volumeID = Long.parseLong(names[0]); + long bucketID = Long.parseLong(names[1]); Set childDirIds = parentSummary.getChildDir(); for (Long childId : childDirIds) { // Fetch the NSSummary for each child directory NSSummary childSummary = reconNamespaceSummaryManager.getNSSummary(childId); if (childSummary != null) { - long volumeID = Long.parseLong(names[0]); - long bucketID = Long.parseLong(names[1]); String subPath = constructObjectPathWithPrefix(volumeID, bucketID, childId); // Add to subPaths From 9aa436fff0d8b85dd79d33b8f332d3c7cdebf0ed Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 19 May 2024 19:34:48 +0530 Subject: [PATCH 17/32] Fixed the test cases and added new ones --- .../recon/api/OMDBInsightSearchEndpoint.java | 23 +- .../api/TestOMDBInsightSearchEndpoint.java | 216 +++++++++--------- 2 files changed, 118 insertions(+), 121 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index bc9b5ab8d170..4e244fba48aa 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -261,14 +261,12 @@ private void gatherSubPaths(long parentId, List subPaths, * with their corresponding IDs. It simplifies database queries for FSO bucket operations. * * @param prevKeyPrefix The path to be converted, not including key or directory names/IDs. - * @return The object path as "/volumeID/bucketID/ParentId/". + * @return The object path as "/volumeID/bucketID/ParentId/" or an empty string if an error occurs. * @throws IOException If database access fails. */ - public String convertToObjectPath(String prevKeyPrefix) - throws IOException, IllegalArgumentException { - - String[] names = parseRequestPath( - normalizePath(prevKeyPrefix, BucketLayout.FILE_SYSTEM_OPTIMIZED)); + public String convertToObjectPath(String prevKeyPrefix) throws IOException, IllegalArgumentException { + try { + String[] names = parseRequestPath(normalizePath(prevKeyPrefix, BucketLayout.FILE_SYSTEM_OPTIMIZED)); // Root-Level :- Return the original path if (names.length == 0) { @@ -279,8 +277,7 @@ public String convertToObjectPath(String prevKeyPrefix) String volumeName = names[0]; validateNames(volumeName); String volumeKey = omMetadataManager.getVolumeKey(volumeName); - long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey) - .getObjectID(); + long volumeId = omMetadataManager.getVolumeTable().getSkipCache(volumeKey).getObjectID(); if (names.length == 1) { return constructObjectPathWithPrefix(volumeId); } @@ -289,19 +286,19 @@ public String convertToObjectPath(String prevKeyPrefix) String bucketName = names[1]; validateNames(bucketName); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - OmBucketInfo bucketInfo = - omMetadataManager.getBucketTable().getSkipCache(bucketKey); + OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().getSkipCache(bucketKey); long bucketId = bucketInfo.getObjectID(); if (names.length == 2) { return constructObjectPathWithPrefix(volumeId, bucketId); } // Fetch the immediate parentID which could be a directory or the bucket itself - BucketHandler handler = - getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, - reconSCM, bucketInfo); + BucketHandler handler = getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); long dirObjectId = handler.getDirInfo(names).getObjectID(); return constructObjectPathWithPrefix(volumeId, bucketId, dirObjectId); + } catch (NullPointerException e) { + return prevKeyPrefix; + } } /** diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 1f5027de0f2d..5b76ca34c8a9 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -142,36 +142,36 @@ private static OMMetadataManager initializeNewOmMetadataManager( } @Test - public void testRootLevelSearch() throws IOException { + public void testVolumeLevelSearch() throws IOException { + String volumePath = "/vola"; Response response = - omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, 20); + omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(10, result.getFsoKeyInfoList().size()); - assertEquals(5, result.getNonFSOKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); // Assert Total Size - assertEquals(15000, result.getUnreplicatedDataSize()); - assertEquals(15000 * 3, result.getReplicatedDataSize()); + assertEquals(10000, result.getUnreplicatedDataSize()); + assertEquals(10000 * 3, result.getReplicatedDataSize()); - // Switch of the include Fso flag - response = - omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, 20); + // Test the same for volumeB. + volumePath = "/volb"; + response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20); + assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(0, result.getFsoKeyInfoList().size()); assertEquals(5, result.getNonFSOKeyInfoList().size()); - - // Switch of the include Non Fso flag - response = - omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, 20); - result = (KeyInsightInfoResponse) response.getEntity(); - assertEquals(10, result.getFsoKeyInfoList().size()); + // Assert Total Size + assertEquals(5000, result.getUnreplicatedDataSize()); + assertEquals(5000 * 3, result.getReplicatedDataSize()); } + @Test public void testBucketLevelSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1",20); + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1",20); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -182,7 +182,7 @@ public void testBucketLevelSearch() throws IOException { assertEquals(5000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/volB/bucketB1", 20); + omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1", 20); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -196,7 +196,7 @@ public void testBucketLevelSearch() throws IOException { @Test public void testDirectoryLevelSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA1", 20); + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira1", 20); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -207,7 +207,7 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA2", 20); + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira2", 20); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -218,7 +218,7 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/volA/bucketA1/dirA3", 20); + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira3", 20); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -232,18 +232,18 @@ public void testDirectoryLevelSearch() throws IOException { @Test public void testLimitSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys(ROOT_PATH, 5); + omdbInsightSearchEndpoint.searchOpenKeys("/vola", 5); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(5, result.getFsoKeyInfoList().size()); - assertEquals(5, result.getNonFSOKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); } @Test public void testSearchOpenKeysWithNoMatchFound() throws IOException { // Given a search prefix that matches no keys - String searchPrefix = "nonexistentKeyPrefix"; + String searchPrefix = "incorrectprefix"; Response response = omdbInsightSearchEndpoint.searchOpenKeys(searchPrefix, 10); @@ -261,16 +261,16 @@ public void testSearchOpenKeysWithNoMatchFound() throws IOException { public void testSearchOpenKeysWithBadRequest() throws IOException { // Give a negative limit int negativeLimit = -1; - Response response = - omdbInsightSearchEndpoint.searchOpenKeys("searchPrefix", negativeLimit); + Response response = omdbInsightSearchEndpoint.searchOpenKeys("@323232", negativeLimit); // Then the response should indicate that the request was bad assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus(), "Expected a 400 BAD REQUEST status"); String entity = (String) response.getEntity(); - assertTrue(entity.contains("Limit cannot be negative."), - "Expected a message indicating the limit was negative"); + assertTrue(entity.contains("Invalid startPrefix: Bucket or Volume name can " + + "only contain lowercase letters, numbers, hyphens, underscores, and periods"), + "Expected a message indicating Invalid startPrefix"); } @@ -279,101 +279,101 @@ public void testSearchOpenKeysWithBadRequest() throws IOException { * The test setup mimics the following filesystem structure with specified sizes: * * root (Total Size: 15000KB) - * ├── volA (Total Size: 10000KB) - * │ ├── bucketA1 (FSO) Total Size: 5000KB - * │ │ ├── fileA1 (Size: 1000KB) - * │ │ ├── fileA2 (Size: 1000KB) - * │ │ ├── dirA1 (Total Size: 1000KB) - * │ │ ├── dirA2 (Total Size: 1000KB) - * │ │ └── dirA3 (Total Size: 1000KB) - * │ ├── bucketA2 (FSO) Total Size: 5000KB - * │ │ ├── fileA3 (Size: 1000KB) - * │ │ ├── fileA4 (Size: 1000KB) - * │ │ ├── dirA4 (Total Size: 1000KB) - * │ │ ├── dirA5 (Total Size: 1000KB) - * │ │ └── dirA6 (Total Size: 1000KB) - * └── volB (Total Size: 5000KB) - * └── bucketB1 (OBS) Total Size: 5000KB - * ├── fileB1 (Size: 1000KB) - * ├── fileB2 (Size: 1000KB) - * ├── fileB3 (Size: 1000KB) - * ├── fileB4 (Size: 1000KB) - * └── fileB5 (Size: 1000KB) + * ├── vola (Total Size: 10000KB) + * │ ├── bucketa1 (FSO) Total Size: 5000KB + * │ │ ├── filea1 (Size: 1000KB) + * │ │ ├── filea2 (Size: 1000KB) + * │ │ ├── dira1 (Total Size: 1000KB) + * │ │ ├── dira2 (Total Size: 1000KB) + * │ │ └── dira3 (Total Size: 1000KB) + * │ ├── bucketa2 (FSO) Total Size: 5000KB + * │ │ ├── filea3 (Size: 1000KB) + * │ │ ├── filea4 (Size: 1000KB) + * │ │ ├── dira4 (Total Size: 1000KB) + * │ │ ├── dira5 (Total Size: 1000KB) + * │ │ └── dira6 (Total Size: 1000KB) + * └── volb (Total Size: 5000KB) + * └── bucketb1 (OBS) Total Size: 5000KB + * ├── fileb1 (Size: 1000KB) + * ├── fileb2 (Size: 1000KB) + * ├── fileb3 (Size: 1000KB) + * ├── fileb4 (Size: 1000KB) + * └── fileb5 (Size: 1000KB) * * @throws Exception */ private void populateOMDB() throws Exception { // Create Volumes - long volAObjectId = createVolume("volA"); - long volBObjectId = createVolume("volB"); + long volaObjectId = createVolume("vola"); + long volbObjectId = createVolume("volb"); - // Create Buckets in volA - long bucketA1ObjectId = - createBucket("volA", "bucketA1", 1000 + 1000 + 1000 + 1000 + 1000, + // Create Buckets in vola + long bucketa1ObjectId = + createBucket("vola", "bucketa1", 1000 + 1000 + 1000 + 1000 + 1000, getFSOBucketLayout()); - long bucketA2ObjectId = - createBucket("volA", "bucketA2", 1000 + 1000 + 1000 + 1000 + 1000, + long bucketa2ObjectId = + createBucket("vola", "bucketa2", 1000 + 1000 + 1000 + 1000 + 1000, getFSOBucketLayout()); - // Create Bucket in volB - long bucketB1ObjectId = - createBucket("volB", "bucketB1", 1000 + 1000 + 1000 + 1000 + 1000, + // Create Bucket in volb + long bucketb1ObjectId = + createBucket("volb", "bucketb1", 1000 + 1000 + 1000 + 1000 + 1000, getOBSBucketLayout()); - // Create Directories and Files under bucketA1 - long dirA1ObjectId = - createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId, - "dirA1"); - long dirA2ObjectId = - createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId, - "dirA2"); - long dirA3ObjectId = - createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId, - "dirA3"); - - // Files directly under bucketA1 - createOpenFile("fileA1", "bucketA1", "volA", "fileA1", bucketA1ObjectId, - bucketA1ObjectId, volAObjectId, 1000); - createOpenFile("fileA2", "bucketA1", "volA", "fileA2", bucketA1ObjectId, - bucketA1ObjectId, volAObjectId, 1000); - - // Create Directories and Files under bucketA2 - long dirA4ObjectId = - createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId, - "dirA4"); - long dirA5ObjectId = - createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId, - "dirA5"); - long dirA6ObjectId = - createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId, - "dirA6"); - - // Files directly under bucketA2 - createOpenFile("fileA3", "bucketA2", "volA", "fileA3", bucketA2ObjectId, - bucketA2ObjectId, volAObjectId, 1000); - createOpenFile("fileA4", "bucketA2", "volA", "fileA4", bucketA2ObjectId, - bucketA2ObjectId, volAObjectId, 1000); - - // Files directly under bucketB1 - createOpenKey("fileB1", "bucketB1", "volB", 1000); - createOpenKey("fileB2", "bucketB1", "volB", 1000); - createOpenKey("fileB3", "bucketB1", "volB", 1000); - createOpenKey("fileB4", "bucketB1", "volB", 1000); - createOpenKey("fileB5", "bucketB1", "volB", 1000); + // Create Directories and Files under bucketa1 + long dira1ObjectId = + createDirectory(bucketa1ObjectId, bucketa1ObjectId, volaObjectId, + "dira1"); + long dira2ObjectId = + createDirectory(bucketa1ObjectId, bucketa1ObjectId, volaObjectId, + "dira2"); + long dira3ObjectId = + createDirectory(bucketa1ObjectId, bucketa1ObjectId, volaObjectId, + "dira3"); + + // Files directly under bucketa1 + createOpenFile("filea1", "bucketa1", "vola", "filea1", bucketa1ObjectId, + bucketa1ObjectId, volaObjectId, 1000); + createOpenFile("filea2", "bucketa1", "vola", "filea2", bucketa1ObjectId, + bucketa1ObjectId, volaObjectId, 1000); + + // Create Directories and Files under bucketa2 + long dira4ObjectId = + createDirectory(bucketa2ObjectId, bucketa2ObjectId, volaObjectId, + "dira4"); + long dira5ObjectId = + createDirectory(bucketa2ObjectId, bucketa2ObjectId, volaObjectId, + "dira5"); + long dira6ObjectId = + createDirectory(bucketa2ObjectId, bucketa2ObjectId, volaObjectId, + "dira6"); + + // Files directly under bucketa2 + createOpenFile("filea3", "bucketa2", "vola", "filea3", bucketa2ObjectId, + bucketa2ObjectId, volaObjectId, 1000); + createOpenFile("filea4", "bucketa2", "vola", "filea4", bucketa2ObjectId, + bucketa2ObjectId, volaObjectId, 1000); + + // Files directly under bucketb1 + createOpenKey("fileb1", "bucketb1", "volb", 1000); + createOpenKey("fileb2", "bucketb1", "volb", 1000); + createOpenKey("fileb3", "bucketb1", "volb", 1000); + createOpenKey("fileb4", "bucketb1", "volb", 1000); + createOpenKey("fileb5", "bucketb1", "volb", 1000); // Create Inner files under directories - createOpenFile("dirA1/innerFile", "bucketA1", "volA", "innerFile", - dirA1ObjectId, bucketA1ObjectId, volAObjectId, 1000); - createOpenFile("dirA2/innerFile", "bucketA1", "volA", "innerFile", - dirA2ObjectId, bucketA1ObjectId, volAObjectId, 1000); - createOpenFile("dirA3/innerFile", "bucketA1", "volA", "innerFile", - dirA3ObjectId, bucketA1ObjectId, volAObjectId, 1000); - createOpenFile("dirA4/innerFile", "bucketA2", "volA", "innerFile", - dirA4ObjectId, bucketA2ObjectId, volAObjectId, 1000); - createOpenFile("dirA5/innerFile", "bucketA2", "volA", "innerFile", - dirA5ObjectId, bucketA2ObjectId, volAObjectId, 1000); - createOpenFile("dirA6/innerFile", "bucketA2", "volA", "innerFile", - dirA6ObjectId, bucketA2ObjectId, volAObjectId, 1000); + createOpenFile("dira1/innerfile", "bucketa1", "vola", "innerfile", + dira1ObjectId, bucketa1ObjectId, volaObjectId, 1000); + createOpenFile("dira2/innerfile", "bucketa1", "vola", "innerfile", + dira2ObjectId, bucketa1ObjectId, volaObjectId, 1000); + createOpenFile("dira3/innerfile", "bucketa1", "vola", "innerfile", + dira3ObjectId, bucketa1ObjectId, volaObjectId, 1000); + createOpenFile("dira4/innerfile", "bucketa2", "vola", "innerfile", + dira4ObjectId, bucketa2ObjectId, volaObjectId, 1000); + createOpenFile("dira5/innerfile", "bucketa2", "vola", "innerfile", + dira5ObjectId, bucketa2ObjectId, volaObjectId, 1000); + createOpenFile("dira6/innerfile", "bucketa2", "vola", "innerfile", + dira6ObjectId, bucketa2ObjectId, volaObjectId, 1000); } /** From 47d9e7e24a64180b419aeb3c5c4dd63febc74fe9 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 19 May 2024 20:05:44 +0530 Subject: [PATCH 18/32] Did some refactoring --- .../ozone/recon/ReconResponseUtils.java | 84 ++++++++++++++ .../apache/hadoop/ozone/recon/ReconUtils.java | 54 +++++++++ .../recon/api/OMDBInsightSearchEndpoint.java | 105 +----------------- 3 files changed, 144 insertions(+), 99 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java new file mode 100644 index 000000000000..a5dde93ffd53 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon; + +import com.google.inject.Singleton; + +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +/** + * Recon API Response Utility class. + */ +@Singleton +public final class ReconResponseUtils { + + // Declared a private constructor to avoid checkstyle issues. + private ReconResponseUtils() { + + } + + /** + * Returns a response indicating that no keys matched the search prefix. + * + * @param startPrefix The search prefix that was used. + * @return The response indicating that no keys matched the search prefix. + */ + public static Response noMatchedKeysResponse(String startPrefix) { + String jsonResponse = String.format( + "{\"message\": \"No keys matched the search prefix: '%s'.\"}", + startPrefix); + return Response.status(Response.Status.NOT_FOUND) + .entity(jsonResponse) + .type(MediaType.APPLICATION_JSON) + .build(); + } + + /** + * Utility method to create a bad request response with a custom message. + * Which means the request sent by the client to the server is incorrect + * or malformed and cannot be processed by the server. + * + * @param message The message to include in the response body. + * @return A Response object configured with the provided message. + */ + public static Response createBadRequestResponse(String message) { + String jsonResponse = String.format("{\"message\": \"%s\"}", message); + return Response.status(Response.Status.BAD_REQUEST) + .entity(jsonResponse) + .type(MediaType.APPLICATION_JSON) + .build(); + } + + /** + * Utility method to create an internal server error response with a custom message. + * Which means the server encountered an unexpected condition that prevented it + * from fulfilling the request. + * + * @param message The message to include in the response body. + * @return A Response object configured with the provided message. + */ + public static Response createInternalServerErrorResponse(String message) { + String jsonResponse = String.format("{\"message\": \"%s\"}", message); + return Response.status(Response.Status.INTERNAL_SERVER_ERROR) + .entity(jsonResponse) + .type(MediaType.APPLICATION_JSON) + .build(); + } +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index f154f024fbda..7ee21efe81a1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -55,11 +55,13 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_THREAD_POOL_SIZE_DEFAULT; import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig; import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; import static org.jooq.impl.DSL.currentTimestamp; import static org.jooq.impl.DSL.select; import static org.jooq.impl.DSL.using; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.recon.api.types.DUResponse; import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue; import org.apache.hadoop.security.authentication.client.AuthenticationException; @@ -70,6 +72,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.ws.rs.core.Response; + /** * Recon Utility class. */ @@ -324,6 +328,56 @@ public static void upsertGlobalStatsTable(Configuration sqlConfiguration, } } + /** + * Validates volume or bucket names according to specific rules. + * + * @param resName The name to validate (volume or bucket). + * @return A Response object if validation fails, or null if the name is valid. + */ + public static Response validateNames(String resName) + throws IllegalArgumentException { + if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH || + resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) { + throw new IllegalArgumentException( + "Bucket or Volume name length should be between " + + OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH + " and " + + OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH); + } + + if (resName.charAt(0) == '.' || resName.charAt(0) == '-' || + resName.charAt(resName.length() - 1) == '.' || + resName.charAt(resName.length() - 1) == '-') { + throw new IllegalArgumentException( + "Bucket or Volume name cannot start or end with " + + "hyphen or period"); + } + + // Regex to check for lowercase letters, numbers, hyphens, underscores, and periods only. + if (!resName.matches("^[a-z0-9._-]+$")) { + throw new IllegalArgumentException( + "Bucket or Volume name can only contain lowercase " + + "letters, numbers, hyphens, underscores, and periods"); + } + + // If all checks pass, the name is valid + return null; + } + + /** + * Constructs an object path with the given IDs. + * + * @param ids The IDs to construct the object path with. + * @return The constructed object path. + */ + public static String constructObjectPathWithPrefix(long... ids) { + StringBuilder pathBuilder = new StringBuilder(); + for (long id : ids) { + pathBuilder.append(OM_KEY_PREFIX).append(id); + } + return pathBuilder.toString(); + } + + /** * Sorts a list of DiskUsage objects in descending order by size using parallel sorting and * returns the top N records as specified by the limit. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 4e244fba48aa..fb6061f2d8b4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -25,6 +25,8 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.recon.ReconResponseUtils; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; @@ -51,6 +53,9 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.recon.ReconConstants.*; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.*; +import static org.apache.hadoop.ozone.recon.ReconUtils.constructObjectPathWithPrefix; +import static org.apache.hadoop.ozone.recon.ReconUtils.validateNames; import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath; import static org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath; @@ -203,8 +208,7 @@ public Map searchOpenKeysInFSO(String startPrefix, } // Iterate over for bucket and volume level search - matchedKeys.putAll( - retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit)); + matchedKeys.putAll(retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit)); return matchedKeys; } @@ -353,101 +357,4 @@ private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, return keyEntityInfo; } - /** - * Constructs an object path with the given IDs. - * - * @param ids The IDs to construct the object path with. - * @return The constructed object path. - */ - private String constructObjectPathWithPrefix(long... ids) { - StringBuilder pathBuilder = new StringBuilder(); - for (long id : ids) { - pathBuilder.append(OM_KEY_PREFIX).append(id); - } - return pathBuilder.toString(); - } - - /** - * Validates volume or bucket names according to specific rules. - * - * @param resName The name to validate (volume or bucket). - * @return A Response object if validation fails, or null if the name is valid. - */ - public Response validateNames(String resName) - throws IllegalArgumentException { - if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH || - resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) { - throw new IllegalArgumentException( - "Bucket or Volume name length should be between " + - OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH + " and " + - OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH); - } - - if (resName.charAt(0) == '.' || resName.charAt(0) == '-' || - resName.charAt(resName.length() - 1) == '.' || - resName.charAt(resName.length() - 1) == '-') { - throw new IllegalArgumentException( - "Bucket or Volume name cannot start or end with " + - "hyphen or period"); - } - - // Regex to check for lowercase letters, numbers, hyphens, underscores, and periods only. - if (!resName.matches("^[a-z0-9._-]+$")) { - throw new IllegalArgumentException( - "Bucket or Volume name can only contain lowercase " + - "letters, numbers, hyphens, underscores, and periods"); - } - - // If all checks pass, the name is valid - return null; - } - - /** - * Returns a response indicating that no keys matched the search prefix. - * - * @param startPrefix The search prefix that was used. - * @return The response indicating that no keys matched the search prefix. - */ - private Response noMatchedKeysResponse(String startPrefix) { - String jsonResponse = String.format( - "{\"message\": \"No keys matched the search prefix: '%s'.\"}", - startPrefix); - return Response.status(Response.Status.NOT_FOUND) - .entity(jsonResponse) - .type(MediaType.APPLICATION_JSON) - .build(); - } - - /** - * Utility method to create a bad request response with a custom message. - * Which means the request sent by the client to the server is incorrect - * or malformed and cannot be processed by the server. - * - * @param message The message to include in the response body. - * @return A Response object configured with the provided message. - */ - private Response createBadRequestResponse(String message) { - String jsonResponse = String.format("{\"message\": \"%s\"}", message); - return Response.status(Response.Status.BAD_REQUEST) - .entity(jsonResponse) - .type(MediaType.APPLICATION_JSON) - .build(); - } - - /** - * Utility method to create an internal server error response with a custom message. - * Which means the server encountered an unexpected condition that prevented it - * from fulfilling the request. - * - * @param message The message to include in the response body. - * @return A Response object configured with the provided message. - */ - private Response createInternalServerErrorResponse(String message) { - String jsonResponse = String.format("{\"message\": \"%s\"}", message); - return Response.status(Response.Status.INTERNAL_SERVER_ERROR) - .entity(jsonResponse) - .type(MediaType.APPLICATION_JSON) - .build(); - } - } From 55f0e9e998aba48d7a723e8de5f0164b544c0804 Mon Sep 17 00:00:00 2001 From: arafat Date: Sun, 19 May 2024 21:20:17 +0530 Subject: [PATCH 19/32] Fixed merge conflict --- .../main/java/org/apache/hadoop/ozone/recon/ReconUtils.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index cafbc0e83e79..f74b83b2ca00 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -65,6 +65,7 @@ import static org.jooq.impl.DSL.using; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.api.types.DUResponse; @@ -80,6 +81,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.xml.ws.Response; + /** * Recon Utility class. */ From a29befccc41c0ad03e1b844d4aecf5576c427368 Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 21 May 2024 16:59:53 +0530 Subject: [PATCH 20/32] Added last key to response and limiting the search starting from Bucket level only --- .../recon/api/OMDBInsightSearchEndpoint.java | 46 ++++++++++++++----- .../api/TestOMDBInsightSearchEndpoint.java | 2 +- 2 files changed, 36 insertions(+), 12 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index fb6061f2d8b4..d676ffc78870 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -90,16 +90,15 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, * This endpoint searches across both File System Optimized (FSO) and Object Store (non-FSO) layouts, * compiling a list of keys that match the given prefix along with their data sizes. *

- * The search prefix may range from the root level ('/') to any specific directory - * or key level (e.g., '/volA/' for everything under 'volA'). The search operation matches - * the prefix against the start of keys' names within the OM DB. + * The search prefix must start from the bucket level ('/volumeName/bucketName/') or any specific directory + * or key level (e.g., '/volA/bucketA/dir1' for everything under 'dir1' inside 'bucketA' of 'volA'). + * The search operation matches the prefix against the start of keys' names within the OM DB. *

* Example Usage: - * 1. A startPrefix of "/" will return all keys in the database. - * 2. A startPrefix of "/volA/" retrieves every key under volume 'volA'. - * 3. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' inside 'bucketA' of 'volA'. + * 1. A startPrefix of "/volA/bucketA/" retrieves every key under bucket 'bucketA' in volume 'volA'. + * 2. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' inside 'bucketA' of 'volA'. * - * @param startPrefix The prefix for searching keys, starting from the root ('/') or any specific path. + * @param startPrefix The prefix for searching keys, starting from the bucket level ('/volumeName/bucketName/') or any specific path. * @param limit Limits the number of returned keys. * @return A KeyInsightInfoResponse, containing matching keys and their data sizes. * @throws IOException On failure to access the OM database or process the operation. @@ -111,14 +110,33 @@ public Response searchOpenKeys( String startPrefix, @DefaultValue(RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT) @QueryParam("limit") int limit) throws IOException { + try { - limit = Math.max(0, limit); // Ensure limit is non-negative + // Ensure startPrefix is not null or empty and starts with '/' + if (startPrefix == null || startPrefix.length() == 0) { + return createBadRequestResponse( + "Invalid startPrefix: Path must be at the bucket level or deeper."); + } + startPrefix = startPrefix.startsWith("/") ? startPrefix : "/" + startPrefix; + + // Split the path to ensure it's at least at the bucket level + String[] pathComponents = startPrefix.split("/"); + if (pathComponents.length < 3 || pathComponents[2].isEmpty()) { + return createBadRequestResponse( + "Invalid startPrefix: Path must be at the bucket level or deeper."); + } + + // Ensure the limit is non-negative + limit = Math.max(0, limit); + + // Initialize response object KeyInsightInfoResponse insightResponse = new KeyInsightInfoResponse(); long replicatedTotal = 0; long unreplicatedTotal = 0; boolean keysFound = false; // Flag to track if any keys are found + String lastKey = null; - // Search keys from non-FSO layout. + // Search for non-fso keys in KeyTable Map obsKeys = new LinkedHashMap<>(); Table openKeyTable = omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); @@ -131,9 +149,10 @@ public Response searchOpenKeys( .add(keyEntityInfo); // Add to non-FSO list replicatedTotal += entry.getValue().getReplicatedSize(); unreplicatedTotal += entry.getValue().getDataSize(); + lastKey = entry.getKey(); // Update lastKey } - // Search keys from FSO layout. + // Search for fso keys in FileTable Map fsoKeys = searchOpenKeysInFSO(startPrefix, limit); for (Map.Entry entry : fsoKeys.entrySet()) { keysFound = true; @@ -143,6 +162,7 @@ public Response searchOpenKeys( .add(keyEntityInfo); // Add to FSO list replicatedTotal += entry.getValue().getReplicatedSize(); unreplicatedTotal += entry.getValue().getDataSize(); + lastKey = entry.getKey(); // Update lastKey } // If no keys were found, return a response indicating that no keys matched @@ -153,12 +173,16 @@ public Response searchOpenKeys( // Set the aggregated totals in the response insightResponse.setReplicatedDataSize(replicatedTotal); insightResponse.setUnreplicatedDataSize(unreplicatedTotal); + insightResponse.setLastKey(lastKey); + // Return the response with the matched keys and their data sizes return Response.ok(insightResponse).build(); } catch (IOException e) { + // Handle IO exceptions and return an internal server error response return createInternalServerErrorResponse( "Error searching open keys in OM DB: " + e.getMessage()); } catch (IllegalArgumentException e) { + // Handle illegal argument exceptions and return a bad request response return createBadRequestResponse( "Invalid startPrefix: " + e.getMessage()); } @@ -174,7 +198,7 @@ public Map searchOpenKeysInFSO(String startPrefix, Table openFileTable = omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); - // If names.length > 2, then the search prefix is at the volume or bucket level hence + // If names.length <= 2, then the search prefix is at the volume or bucket level hence // no need to find parent or extract id's or find subpaths as the openFileTable is // suitable for volume and bucket level search if (names.length > 2) { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 5b76ca34c8a9..e7d1e9ff1f6c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -143,7 +143,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( @Test public void testVolumeLevelSearch() throws IOException { - String volumePath = "/vola"; + String volumePath = "/"; Response response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20); assertEquals(200, response.getStatus()); From 4ba887fc49f64ffb0e53174f677a4c23582b481e Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 21 May 2024 18:02:42 +0530 Subject: [PATCH 21/32] Fixed checkstyle issues --- .../ozone/recon/ReconResponseUtils.java | 2 +- .../recon/api/OMDBInsightSearchEndpoint.java | 13 ++- .../api/TestOMDBInsightSearchEndpoint.java | 97 +++++++++++-------- 3 files changed, 64 insertions(+), 48 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java index a5dde93ffd53..41235ae54280 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconResponseUtils.java @@ -81,4 +81,4 @@ public static Response createInternalServerErrorResponse(String message) { .type(MediaType.APPLICATION_JSON) .build(); } -} \ No newline at end of file +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index d676ffc78870..19ae7c21edf5 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -21,12 +21,9 @@ import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.recon.ReconResponseUtils; -import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; @@ -51,9 +48,11 @@ import java.util.ArrayList; import java.util.Set; -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.recon.ReconConstants.*; -import static org.apache.hadoop.ozone.recon.ReconResponseUtils.*; +import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_START_PREFIX; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.noMatchedKeysResponse; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createBadRequestResponse; +import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createInternalServerErrorResponse; import static org.apache.hadoop.ozone.recon.ReconUtils.constructObjectPathWithPrefix; import static org.apache.hadoop.ozone.recon.ReconUtils.validateNames; import static org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler; @@ -98,7 +97,7 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, * 1. A startPrefix of "/volA/bucketA/" retrieves every key under bucket 'bucketA' in volume 'volA'. * 2. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' inside 'bucketA' of 'volA'. * - * @param startPrefix The prefix for searching keys, starting from the bucket level ('/volumeName/bucketName/') or any specific path. + * @param startPrefix The prefix for searching keys, starting from the bucket level or any specific path. * @param limit Limits the number of returned keys. * @return A KeyInsightInfoResponse, containing matching keys and their data sizes. * @throws IOException On failure to access the OM database or process the operation. diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index e7d1e9ff1f6c..305a2774e25c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -32,27 +32,22 @@ import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO; -import org.glassfish.jersey.internal.Errors; import org.junit.jupiter.api.BeforeEach; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.*; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import org.junit.platform.commons.logging.Logger; -import org.junit.platform.commons.logging.LoggerFactory; import javax.ws.rs.core.Response; @@ -60,11 +55,9 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.util.*; +import java.util.UUID; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; -import static org.mockito.Mockito.when; - /** * Test for OMDBInsightSearchEndpoint. @@ -73,9 +66,6 @@ public class TestOMDBInsightSearchEndpoint extends AbstractReconSqlDBTest { @TempDir private Path temporaryFolder; - - Logger LOG = LoggerFactory.getLogger(TestOMDBInsightSearchEndpoint.class); - private ReconOMMetadataManager reconOMMetadataManager; private OMDBInsightSearchEndpoint omdbInsightSearchEndpoint; private OzoneConfiguration ozoneConfiguration; @@ -142,36 +132,47 @@ private static OMMetadataManager initializeNewOmMetadataManager( } @Test - public void testVolumeLevelSearch() throws IOException { - String volumePath = "/"; - Response response = - omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20); - assertEquals(200, response.getStatus()); - KeyInsightInfoResponse result = - (KeyInsightInfoResponse) response.getEntity(); - assertEquals(10, result.getFsoKeyInfoList().size()); - assertEquals(0, result.getNonFSOKeyInfoList().size()); - // Assert Total Size - assertEquals(10000, result.getUnreplicatedDataSize()); - assertEquals(10000 * 3, result.getReplicatedDataSize()); + public void testVolumeLevelSearchRestriction() throws IOException { + // Test with volume level path + String volumePath = "/vola"; + Response response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); - // Test the same for volumeB. + // Test with another volume level path volumePath = "/volb"; response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20); - assertEquals(200, response.getStatus()); - result = (KeyInsightInfoResponse) response.getEntity(); - assertEquals(0, result.getFsoKeyInfoList().size()); - assertEquals(5, result.getNonFSOKeyInfoList().size()); - // Assert Total Size - assertEquals(5000, result.getUnreplicatedDataSize()); - assertEquals(5000 * 3, result.getReplicatedDataSize()); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); } + @Test + public void testRootLevelSearchRestriction() throws IOException { + // Test with root level path + String rootPath = "/"; + Response response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + + // Test with root level path without trailing slash + rootPath = ""; + response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); + } @Test public void testBucketLevelSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1",20); + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 20); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -232,18 +233,18 @@ public void testDirectoryLevelSearch() throws IOException { @Test public void testLimitSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola", 5); + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 2); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); - assertEquals(5, result.getFsoKeyInfoList().size()); + assertEquals(2, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); } @Test public void testSearchOpenKeysWithNoMatchFound() throws IOException { // Given a search prefix that matches no keys - String searchPrefix = "incorrectprefix"; + String searchPrefix = "unknown-volume/unknown-bucket/"; Response response = omdbInsightSearchEndpoint.searchOpenKeys(searchPrefix, 10); @@ -268,9 +269,8 @@ public void testSearchOpenKeysWithBadRequest() throws IOException { response.getStatus(), "Expected a 400 BAD REQUEST status"); String entity = (String) response.getEntity(); - assertTrue(entity.contains("Invalid startPrefix: Bucket or Volume name can " + - "only contain lowercase letters, numbers, hyphens, underscores, and periods"), - "Expected a message indicating Invalid startPrefix"); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); } @@ -376,6 +376,23 @@ private void populateOMDB() throws Exception { dira6ObjectId, bucketa2ObjectId, volaObjectId, 1000); } + @Test + public void testLastKeyInResponse() throws IOException { + Response response = + omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1", 20); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(0, result.getFsoKeyInfoList().size()); + assertEquals(5, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(5000, result.getUnreplicatedDataSize()); + assertEquals(5000 * 3, result.getReplicatedDataSize()); + // Assert Last Key + assertEquals(ROOT_PATH + "volb/bucketb1/fileb5", result.getLastKey(), + "Expected last key to be 'fileb5'"); + } + /** * Create a volume and add it to the Volume Table. * From 0999c2a08bf631c230127733b536e9b1bde7d756 Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 21 May 2024 21:46:03 +0530 Subject: [PATCH 22/32] Added pagination logic --- .../hadoop/ozone/recon/ReconConstants.java | 1 + .../recon/api/OMDBInsightSearchEndpoint.java | 52 +++++---- .../api/TestOMDBInsightSearchEndpoint.java | 101 ++++++++++++------ 3 files changed, 100 insertions(+), 54 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index 359eb1b6fd5c..08c703711961 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -51,6 +51,7 @@ private ReconConstants() { public static final String RECON_OPEN_KEY_INCLUDE_NON_FSO = "includeNonFso"; public static final String RECON_OPEN_KEY_INCLUDE_FSO = "includeFso"; public static final String RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT = "1000"; + public static final String RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY = ""; public static final String RECON_QUERY_FILTER = "missingIn"; public static final String PREV_CONTAINER_ID_DEFAULT_VALUE = "0"; public static final String PREV_DELETED_BLOCKS_TRANSACTION_ID_DEFAULT_VALUE = diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 19ae7c21edf5..fba5f5294923 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -50,6 +50,7 @@ import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_START_PREFIX; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY; import static org.apache.hadoop.ozone.recon.ReconResponseUtils.noMatchedKeysResponse; import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createBadRequestResponse; import static org.apache.hadoop.ozone.recon.ReconResponseUtils.createInternalServerErrorResponse; @@ -99,6 +100,7 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, * * @param startPrefix The prefix for searching keys, starting from the bucket level or any specific path. * @param limit Limits the number of returned keys. + * @param prevKey The key to start after for the next set of records. * @return A KeyInsightInfoResponse, containing matching keys and their data sizes. * @throws IOException On failure to access the OM database or process the operation. */ @@ -108,7 +110,8 @@ public Response searchOpenKeys( @DefaultValue(DEFAULT_START_PREFIX) @QueryParam("startPrefix") String startPrefix, @DefaultValue(RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT) @QueryParam("limit") - int limit) throws IOException { + int limit, + @DefaultValue(RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY) @QueryParam("prevKey") String prevKey) throws IOException { try { // Ensure startPrefix is not null or empty and starts with '/' @@ -139,7 +142,7 @@ public Response searchOpenKeys( Map obsKeys = new LinkedHashMap<>(); Table openKeyTable = omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); - obsKeys = retrieveKeysFromTable(openKeyTable, startPrefix, limit); + obsKeys = retrieveKeysFromTable(openKeyTable, startPrefix, limit, prevKey); for (Map.Entry entry : obsKeys.entrySet()) { keysFound = true; KeyEntityInfo keyEntityInfo = @@ -152,7 +155,7 @@ public Response searchOpenKeys( } // Search for fso keys in FileTable - Map fsoKeys = searchOpenKeysInFSO(startPrefix, limit); + Map fsoKeys = searchOpenKeysInFSO(startPrefix, limit, prevKey); for (Map.Entry entry : fsoKeys.entrySet()) { keysFound = true; KeyEntityInfo keyEntityInfo = @@ -188,7 +191,7 @@ public Response searchOpenKeys( } public Map searchOpenKeysInFSO(String startPrefix, - int limit) + int limit, String prevKey) throws IOException, IllegalArgumentException { Map matchedKeys = new LinkedHashMap<>(); // Convert the search prefix to an object path for FSO buckets @@ -220,9 +223,8 @@ public Map searchOpenKeysInFSO(String startPrefix, // Iterate over the subpaths and retrieve the open files for (String subPath : subPaths) { - matchedKeys.putAll( - retrieveKeysFromTable(openFileTable, subPath, - limit - matchedKeys.size())); + matchedKeys.putAll(retrieveKeysFromTable(openFileTable, subPath, + limit - matchedKeys.size(), prevKey)); if (matchedKeys.size() >= limit) { break; } @@ -231,7 +233,7 @@ public Map searchOpenKeysInFSO(String startPrefix, } // Iterate over for bucket and volume level search - matchedKeys.putAll(retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit)); + matchedKeys.putAll(retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit, prevKey)); return matchedKeys; } @@ -239,7 +241,7 @@ public Map searchOpenKeysInFSO(String startPrefix, * Finds all subdirectories under a parent directory in an FSO bucket. It builds * a list of paths for these subdirectories. These sub-directories are then used * to search for open files in the openFileTable. - *

+ * * How it works: * - Starts from a parent directory identified by parentId. * - Looks through all child directories of this parent. @@ -256,8 +258,7 @@ public Map searchOpenKeysInFSO(String startPrefix, private void gatherSubPaths(long parentId, List subPaths, String[] names) throws IOException { // Fetch the NSSummary object for parentId - NSSummary parentSummary = - reconNamespaceSummaryManager.getNSSummary(parentId); + NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); if (parentSummary == null) { return; } @@ -266,11 +267,9 @@ private void gatherSubPaths(long parentId, List subPaths, Set childDirIds = parentSummary.getChildDir(); for (Long childId : childDirIds) { // Fetch the NSSummary for each child directory - NSSummary childSummary = - reconNamespaceSummaryManager.getNSSummary(childId); + NSSummary childSummary = reconNamespaceSummaryManager.getNSSummary(childId); if (childSummary != null) { - String subPath = - constructObjectPathWithPrefix(volumeID, bucketID, childId); + String subPath = constructObjectPathWithPrefix(volumeID, bucketID, childId); // Add to subPaths subPaths.add(subPath); // Recurse into this child directory @@ -334,16 +333,25 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException, Ille * @param table The table to retrieve keys from. * @param startPrefix The search prefix to match keys against. * @param limit The maximum number of keys to retrieve. + * @param prevKey The key to start after for the next set of records. * @return A map of keys and their corresponding OmKeyInfo objects. * @throws IOException If there are problems accessing the table. */ private Map retrieveKeysFromTable( - Table table, String startPrefix, int limit) + Table table, String startPrefix, int limit, String prevKey) throws IOException { Map matchedKeys = new LinkedHashMap<>(); - try ( - TableIterator> keyIter = table.iterator()) { - keyIter.seek(startPrefix); + try (TableIterator> keyIter = table.iterator()) { + // If a previous key is provided, seek to the previous key and skip it. + if (!prevKey.isEmpty()) { + keyIter.seek(prevKey); + if (keyIter.hasNext() && keyIter.next().getKey().equals(prevKey)) { + // Skip the previous key + } + } else { + // If no previous key is provided, start from the search prefix. + keyIter.seek(startPrefix); + } while (keyIter.hasNext() && matchedKeys.size() < limit) { Table.KeyValue entry = keyIter.next(); String dbKey = entry.getKey(); @@ -353,8 +361,7 @@ private Map retrieveKeysFromTable( matchedKeys.put(dbKey, entry.getValue()); } } catch (IOException exception) { - LOG.error("Error retrieving keys from table for path: {}", startPrefix, - exception); + LOG.error("Error retrieving keys from table for path: {}", startPrefix, exception); throw exception; } return matchedKeys; @@ -371,8 +378,7 @@ private KeyEntityInfo createKeyEntityInfoFromOmKeyInfo(String dbKey, OmKeyInfo keyInfo) { KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); keyEntityInfo.setKey(dbKey); // Set the DB key - keyEntityInfo.setPath( - keyInfo.getKeyName()); // Assuming path is the same as key name + keyEntityInfo.setPath(keyInfo.getKeyName()); // Assuming path is the same as key name keyEntityInfo.setInStateSince(keyInfo.getCreationTime()); keyEntityInfo.setSize(keyInfo.getDataSize()); keyEntityInfo.setReplicatedSize(keyInfo.getReplicatedSize()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 305a2774e25c..5030edb1ebcf 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -36,9 +36,11 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.*; import static org.mockito.Mockito.mock; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; @@ -135,7 +137,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( public void testVolumeLevelSearchRestriction() throws IOException { // Test with volume level path String volumePath = "/vola"; - Response response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20); + Response response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20, ""); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -143,7 +145,7 @@ public void testVolumeLevelSearchRestriction() throws IOException { // Test with another volume level path volumePath = "/volb"; - response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20); + response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20, ""); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -154,7 +156,7 @@ public void testVolumeLevelSearchRestriction() throws IOException { public void testRootLevelSearchRestriction() throws IOException { // Test with root level path String rootPath = "/"; - Response response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20); + Response response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20, ""); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -162,7 +164,7 @@ public void testRootLevelSearchRestriction() throws IOException { // Test with root level path without trailing slash rootPath = ""; - response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20); + response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20, ""); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -172,7 +174,7 @@ public void testRootLevelSearchRestriction() throws IOException { @Test public void testBucketLevelSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 20); + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 20, ""); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -183,7 +185,7 @@ public void testBucketLevelSearch() throws IOException { assertEquals(5000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1", 20); + omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1", 20, ""); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -197,7 +199,7 @@ public void testBucketLevelSearch() throws IOException { @Test public void testDirectoryLevelSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira1", 20); + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira1", 20, ""); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -208,7 +210,7 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira2", 20); + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira2", 20, ""); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -219,7 +221,7 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(1000 * 3, result.getReplicatedDataSize()); response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira3", 20); + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira3", 20, ""); assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); @@ -233,7 +235,7 @@ public void testDirectoryLevelSearch() throws IOException { @Test public void testLimitSearch() throws IOException { Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 2); + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 2, ""); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); @@ -247,7 +249,7 @@ public void testSearchOpenKeysWithNoMatchFound() throws IOException { String searchPrefix = "unknown-volume/unknown-bucket/"; Response response = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefix, 10); + omdbInsightSearchEndpoint.searchOpenKeys(searchPrefix, 10, ""); // Then the response should indicate that no keys were found assertEquals(Response.Status.NOT_FOUND.getStatusCode(), @@ -262,7 +264,7 @@ public void testSearchOpenKeysWithNoMatchFound() throws IOException { public void testSearchOpenKeysWithBadRequest() throws IOException { // Give a negative limit int negativeLimit = -1; - Response response = omdbInsightSearchEndpoint.searchOpenKeys("@323232", negativeLimit); + Response response = omdbInsightSearchEndpoint.searchOpenKeys("@323232", negativeLimit, ""); // Then the response should indicate that the request was bad assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), @@ -273,6 +275,60 @@ public void testSearchOpenKeysWithBadRequest() throws IOException { "Expected a message indicating the path must be at the bucket level or deeper"); } + @Test + public void testLastKeyInResponse() throws IOException { + Response response = + omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1", 20, ""); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(0, result.getFsoKeyInfoList().size()); + assertEquals(5, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(5000, result.getUnreplicatedDataSize()); + assertEquals(5000 * 3, result.getReplicatedDataSize()); + // Assert Last Key + assertEquals(ROOT_PATH + "volb/bucketb1/fileb5", result.getLastKey(), + "Expected last key to be 'fileb5'"); + } + + @Test + public void testSearchOpenKeysWithPagination() throws IOException { + // Set the initial parameters + String startPrefix = "/volb/bucketb1"; + int limit = 2; + String prevKey = ""; + + // Perform the first search request + Response response = omdbInsightSearchEndpoint.searchOpenKeys(startPrefix, limit, prevKey); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getNonFSOKeyInfoList().size()); + assertEquals(0, result.getFsoKeyInfoList().size()); + + // Extract the last key from the response + prevKey = result.getLastKey(); + assertNotNull(prevKey, "Last key should not be null"); + + // Perform the second search request using the last key + response = omdbInsightSearchEndpoint.searchOpenKeys(startPrefix, limit, prevKey); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getNonFSOKeyInfoList().size()); + assertEquals(0, result.getFsoKeyInfoList().size()); + + // Extract the last key from the response + prevKey = result.getLastKey(); + assertNotNull(prevKey, "Last key should not be null"); + + // Perform the third search request using the last key + response = omdbInsightSearchEndpoint.searchOpenKeys(startPrefix, limit, prevKey); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getNonFSOKeyInfoList().size()); + assertEquals(0, result.getFsoKeyInfoList().size()); + } + /** * Tests the NSSummaryEndpoint for a given volume, bucket, and directory structure. @@ -376,23 +432,6 @@ private void populateOMDB() throws Exception { dira6ObjectId, bucketa2ObjectId, volaObjectId, 1000); } - @Test - public void testLastKeyInResponse() throws IOException { - Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1", 20); - assertEquals(200, response.getStatus()); - KeyInsightInfoResponse result = - (KeyInsightInfoResponse) response.getEntity(); - assertEquals(0, result.getFsoKeyInfoList().size()); - assertEquals(5, result.getNonFSOKeyInfoList().size()); - // Assert Total Size - assertEquals(5000, result.getUnreplicatedDataSize()); - assertEquals(5000 * 3, result.getReplicatedDataSize()); - // Assert Last Key - assertEquals(ROOT_PATH + "volb/bucketb1/fileb5", result.getLastKey(), - "Expected last key to be 'fileb5'"); - } - /** * Create a volume and add it to the Volume Table. * From da9c4683893e0d687152c11b81e845206437649c Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 22 May 2024 01:28:43 +0530 Subject: [PATCH 23/32] Fixed bugs and checkstyle --- .../ozone/recon/api/OMDBInsightSearchEndpoint.java | 7 ++++--- .../recon/api/TestOMDBInsightSearchEndpoint.java | 11 +++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index fba5f5294923..8d411faf1ba9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -139,10 +139,10 @@ public Response searchOpenKeys( String lastKey = null; // Search for non-fso keys in KeyTable - Map obsKeys = new LinkedHashMap<>(); Table openKeyTable = omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY); - obsKeys = retrieveKeysFromTable(openKeyTable, startPrefix, limit, prevKey); + Map obsKeys = + retrieveKeysFromTable(openKeyTable, startPrefix, limit, prevKey); for (Map.Entry entry : obsKeys.entrySet()) { keysFound = true; KeyEntityInfo keyEntityInfo = @@ -345,8 +345,9 @@ private Map retrieveKeysFromTable( // If a previous key is provided, seek to the previous key and skip it. if (!prevKey.isEmpty()) { keyIter.seek(prevKey); - if (keyIter.hasNext() && keyIter.next().getKey().equals(prevKey)) { + if (keyIter.hasNext()) { // Skip the previous key + keyIter.next(); } } else { // If no previous key is provided, start from the search prefix. diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 5030edb1ebcf..040738583427 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -40,7 +40,9 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD; -import static org.junit.jupiter.api.Assertions.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.mockito.Mockito.mock; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; @@ -85,8 +87,6 @@ public void setUp() throws Exception { omMetadataManager = initializeNewOmMetadataManager( Files.createDirectory(temporaryFolder.resolve("JunitOmDBDir")) .toFile()); - OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = - getMockOzoneManagerServiceProviderWithFSO(); reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, Files.createDirectory(temporaryFolder.resolve("OmMetataDir")).toFile()); @@ -361,7 +361,7 @@ public void testSearchOpenKeysWithPagination() throws IOException { private void populateOMDB() throws Exception { // Create Volumes long volaObjectId = createVolume("vola"); - long volbObjectId = createVolume("volb"); + createVolume("volb"); // Create Buckets in vola long bucketa1ObjectId = @@ -372,8 +372,7 @@ private void populateOMDB() throws Exception { getFSOBucketLayout()); // Create Bucket in volb - long bucketb1ObjectId = - createBucket("volb", "bucketb1", 1000 + 1000 + 1000 + 1000 + 1000, + createBucket("volb", "bucketb1", 1000 + 1000 + 1000 + 1000 + 1000, getOBSBucketLayout()); // Create Directories and Files under bucketa1 From 8b984a2ff7f6d22639d619ff5b2dea8c6b42b204 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 22 May 2024 11:14:39 +0530 Subject: [PATCH 24/32] Fixed checkstyle issues --- .../hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 040738583427..2982242d0791 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -35,7 +35,6 @@ import org.junit.jupiter.api.BeforeEach; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; From 2a57d5da1cf7f6488d8b37d6b46ef46131c6c578 Mon Sep 17 00:00:00 2001 From: arafat Date: Mon, 27 May 2024 20:10:17 +0530 Subject: [PATCH 25/32] Made review comments on excpeitons thrown --- .../recon/api/OMDBInsightSearchEndpoint.java | 10 +++-- .../api/TestOMDBInsightSearchEndpoint.java | 43 +++++++++++++++++++ 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 8d411faf1ba9..4c9e5dfb29ab 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -290,7 +290,7 @@ private void gatherSubPaths(long parentId, List subPaths, * @return The object path as "/volumeID/bucketID/ParentId/" or an empty string if an error occurs. * @throws IOException If database access fails. */ - public String convertToObjectPath(String prevKeyPrefix) throws IOException, IllegalArgumentException { + public String convertToObjectPath(String prevKeyPrefix) throws IOException { try { String[] names = parseRequestPath(normalizePath(prevKeyPrefix, BucketLayout.FILE_SYSTEM_OPTIMIZED)); @@ -314,7 +314,7 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException, Ille String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().getSkipCache(bucketKey); long bucketId = bucketInfo.getObjectID(); - if (names.length == 2) { + if (names.length == 2 || bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { return constructObjectPathWithPrefix(volumeId, bucketId); } @@ -322,7 +322,11 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException, Ille BucketHandler handler = getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); long dirObjectId = handler.getDirInfo(names).getObjectID(); return constructObjectPathWithPrefix(volumeId, bucketId, dirObjectId); - } catch (NullPointerException e) { + } catch (IllegalArgumentException e) { + LOG.error("IllegalArgumentException encountered while converting key prefix to object path: {}", prevKeyPrefix, e); + throw e; + } catch (RuntimeException e) { + LOG.error("RuntimeException encountered while converting key prefix to object path: {}", prevKeyPrefix, e); return prevKeyPrefix; } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 2982242d0791..f530669aa74e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -326,6 +326,49 @@ public void testSearchOpenKeysWithPagination() throws IOException { result = (KeyInsightInfoResponse) response.getEntity(); assertEquals(1, result.getNonFSOKeyInfoList().size()); assertEquals(0, result.getFsoKeyInfoList().size()); + assertEquals(result.getNonFSOKeyInfoList().get(0).getKey(), result.getLastKey(), + "Expected last key to be empty"); + } + + @Test + public void testKeyLevelSearch() throws IOException { + // FSO Bucket key-level search + Response response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/filea1", 1, ""); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(1000, result.getUnreplicatedDataSize()); + assertEquals(1000 * 3, result.getReplicatedDataSize()); + + response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/filea2", 1, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(1000, result.getUnreplicatedDataSize()); + assertEquals(1000 * 3, result.getReplicatedDataSize()); + + // OBS Bucket key-level search + response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/fileb1", 1, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(0, result.getFsoKeyInfoList().size()); + assertEquals(1, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(1000, result.getUnreplicatedDataSize()); + assertEquals(1000 * 3, result.getReplicatedDataSize()); + + response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/fileb2", 1, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(0, result.getFsoKeyInfoList().size()); + assertEquals(1, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(1000, result.getUnreplicatedDataSize()); + assertEquals(1000 * 3, result.getReplicatedDataSize()); } From 124773f3382dbe0191d0884c8eeec8c9226f42b2 Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 28 May 2024 10:53:12 +0530 Subject: [PATCH 26/32] Added changes to accomodate key level search --- .../recon/api/OMDBInsightSearchEndpoint.java | 74 +++++++-- .../api/TestOMDBInsightSearchEndpoint.java | 152 ++++++++++-------- 2 files changed, 141 insertions(+), 85 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 4c9e5dfb29ab..830974bb88e4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -48,6 +48,7 @@ import java.util.ArrayList; import java.util.Set; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_START_PREFIX; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OPEN_KEY_SEARCH_DEFAULT_PREV_KEY; @@ -203,13 +204,12 @@ public Map searchOpenKeysInFSO(String startPrefix, // If names.length <= 2, then the search prefix is at the volume or bucket level hence // no need to find parent or extract id's or find subpaths as the openFileTable is // suitable for volume and bucket level search - if (names.length > 2) { + if (names.length > 2 && startPrefixObjectPath.endsWith(OM_KEY_PREFIX)) { // Fetch the parent ID to search for long parentId = Long.parseLong(names[names.length - 1]); // Fetch the nameSpaceSummary for the parent ID - NSSummary parentSummary = - reconNamespaceSummaryManager.getNSSummary(parentId); + NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); if (parentSummary == null) { return matchedKeys; } @@ -223,8 +223,7 @@ public Map searchOpenKeysInFSO(String startPrefix, // Iterate over the subpaths and retrieve the open files for (String subPath : subPaths) { - matchedKeys.putAll(retrieveKeysFromTable(openFileTable, subPath, - limit - matchedKeys.size(), prevKey)); + matchedKeys.putAll(retrieveKeysFromTable(openFileTable, subPath, limit - matchedKeys.size(), prevKey)); if (matchedKeys.size() >= limit) { break; } @@ -232,7 +231,7 @@ public Map searchOpenKeysInFSO(String startPrefix, return matchedKeys; } - // Iterate over for bucket and volume level search + // If the search level is at the volume, bucket or key level, directly search the openFileTable matchedKeys.putAll(retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit, prevKey)); return matchedKeys; } @@ -294,12 +293,12 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException { try { String[] names = parseRequestPath(normalizePath(prevKeyPrefix, BucketLayout.FILE_SYSTEM_OPTIMIZED)); - // Root-Level :- Return the original path + // Root-Level: Return the original path if (names.length == 0) { return prevKeyPrefix; } - // Volume-Level :- Fetch the volumeID + // Volume-Level: Fetch the volumeID String volumeName = names[0]; validateNames(volumeName); String volumeKey = omMetadataManager.getVolumeKey(volumeName); @@ -308,7 +307,7 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException { return constructObjectPathWithPrefix(volumeId); } - // Bucket-Level :- Fetch the bucketID + // Bucket-Level: Fetch the bucketID String bucketName = names[1]; validateNames(bucketName); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); @@ -318,11 +317,25 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException { return constructObjectPathWithPrefix(volumeId, bucketId); } - // Fetch the immediate parentID which could be a directory or the bucket itself - BucketHandler handler = getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); - long dirObjectId = handler.getDirInfo(names).getObjectID(); - return constructObjectPathWithPrefix(volumeId, bucketId, dirObjectId); - } catch (IllegalArgumentException e) { + // Check if the last element is a key + if (isKey(volumeId, bucketId, names[names.length - 1])) { + // The last element is a key hence return the path with the key prefix + return constructObjectPathWithPrefix(volumeId, bucketId, bucketId) + OM_KEY_PREFIX + names[names.length - 1]; + } + + // Attempt to fetch the immediate parentID which could be a directory or the bucket itself + BucketHandler handler = + getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); + long dirObjectId; + try { + dirObjectId = handler.getDirInfo(names).getObjectID(); + } catch (NullPointerException e) { + // If fetching directory also then it's neither a key nor a directory hence unknown entity. + return constructObjectPathWithPrefix(volumeId, bucketId, bucketId) + OM_KEY_PREFIX + names[names.length - 1]; + } + // If it's a directory, return the path with the directory prefix with a trailing '/' indicating a directory + return constructObjectPathWithPrefix(volumeId, bucketId, dirObjectId) + OM_KEY_PREFIX; + } catch (IllegalArgumentException e) { LOG.error("IllegalArgumentException encountered while converting key prefix to object path: {}", prevKeyPrefix, e); throw e; } catch (RuntimeException e) { @@ -331,6 +344,39 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException { } } + + /** + * Checks if the last element in the path is a key. + * + * @param volumeId The volume ID. + * @param bucketId The bucket ID. + * @param potentialKeyName The name of the potential key. + * @return True if the last element is a key, false otherwise. + * @throws IOException If database access fails. + */ + private boolean isKey(long volumeId, long bucketId, String potentialKeyName) + throws IOException { + String keyPrefixObjectPath = + constructObjectPathWithPrefix(volumeId, bucketId, bucketId); + + Table openFileTable = + omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + try (TableIterator> keyIter = openFileTable.iterator()) { + String searchKey = keyPrefixObjectPath + OM_KEY_PREFIX + potentialKeyName; + keyIter.seek(searchKey); + if (keyIter.hasNext()) { + Table.KeyValue keyValue = keyIter.next(); + return keyValue.getValue().getFileName().equals(potentialKeyName); + } + return false; + } catch (IOException e) { + LOG.error( + "IOException encountered while checking if the last element is a key: {}", + potentialKeyName, e); + throw e; + } + } + /** * Common method to retrieve keys from a table based on a search prefix and a limit. * diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index f530669aa74e..8b437a6d6d87 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -133,18 +133,18 @@ private static OMMetadataManager initializeNewOmMetadataManager( } @Test - public void testVolumeLevelSearchRestriction() throws IOException { - // Test with volume level path - String volumePath = "/vola"; - Response response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20, ""); + public void testRootLevelSearchRestriction() throws IOException { + // Test with root level path + String rootPath = "/"; + Response response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20, ""); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), "Expected a message indicating the path must be at the bucket level or deeper"); - // Test with another volume level path - volumePath = "/volb"; - response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20, ""); + // Test with root level path without trailing slash + rootPath = ""; + response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20, ""); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -152,18 +152,18 @@ public void testVolumeLevelSearchRestriction() throws IOException { } @Test - public void testRootLevelSearchRestriction() throws IOException { - // Test with root level path - String rootPath = "/"; - Response response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20, ""); + public void testVolumeLevelSearchRestriction() throws IOException { + // Test with volume level path + String volumePath = "/vola"; + Response response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20, ""); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), "Expected a message indicating the path must be at the bucket level or deeper"); - // Test with root level path without trailing slash - rootPath = ""; - response = omdbInsightSearchEndpoint.searchOpenKeys(rootPath, 20, ""); + // Test with another volume level path + volumePath = "/volb"; + response = omdbInsightSearchEndpoint.searchOpenKeys(volumePath, 20, ""); assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), @@ -193,6 +193,13 @@ public void testBucketLevelSearch() throws IOException { // Assert Total Size assertEquals(5000, result.getUnreplicatedDataSize()); assertEquals(5000 * 3, result.getReplicatedDataSize()); + + // Test with bucket that does not exist + response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/nonexistentbucket", 20, ""); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); } @Test @@ -229,34 +236,78 @@ public void testDirectoryLevelSearch() throws IOException { // Assert Total Size assertEquals(1000, result.getUnreplicatedDataSize()); assertEquals(1000 * 3, result.getReplicatedDataSize()); + + // Test with non-existent directory + response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/nonexistentdir", 20, ""); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); } @Test - public void testLimitSearch() throws IOException { - Response response = - omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 2, ""); + public void testKeyLevelSearch() throws IOException { + // FSO Bucket key-level search + Response response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/filea1", 10, ""); assertEquals(200, response.getStatus()); - KeyInsightInfoResponse result = - (KeyInsightInfoResponse) response.getEntity(); - assertEquals(2, result.getFsoKeyInfoList().size()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); - } + // Assert Total Size + assertEquals(1000, result.getUnreplicatedDataSize()); + assertEquals(1000 * 3, result.getReplicatedDataSize()); - @Test - public void testSearchOpenKeysWithNoMatchFound() throws IOException { - // Given a search prefix that matches no keys - String searchPrefix = "unknown-volume/unknown-bucket/"; + response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/filea2", 10, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(1000, result.getUnreplicatedDataSize()); + assertEquals(1000 * 3, result.getReplicatedDataSize()); - Response response = - omdbInsightSearchEndpoint.searchOpenKeys(searchPrefix, 10, ""); + // OBS Bucket key-level search + response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/fileb1", 10, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(0, result.getFsoKeyInfoList().size()); + assertEquals(1, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(1000, result.getUnreplicatedDataSize()); + assertEquals(1000 * 3, result.getReplicatedDataSize()); - // Then the response should indicate that no keys were found - assertEquals(Response.Status.NOT_FOUND.getStatusCode(), - response.getStatus(), "Expected a 404 NOT FOUND status"); + response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/fileb2", 10, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(0, result.getFsoKeyInfoList().size()); + assertEquals(1, result.getNonFSOKeyInfoList().size()); + // Assert Total Size + assertEquals(1000, result.getUnreplicatedDataSize()); + assertEquals(1000 * 3, result.getReplicatedDataSize()); + // Test with non-existent key + response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/nonexistentfile", 1, ""); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); String entity = (String) response.getEntity(); assertTrue(entity.contains("No keys matched the search prefix"), "Expected a message indicating no keys were found"); + + response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/nonexistentfile", 1, ""); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + @Test + public void testLimitSearch() throws IOException { + Response response = + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 2, ""); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = + (KeyInsightInfoResponse) response.getEntity(); + assertEquals(2, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); } @Test @@ -330,47 +381,6 @@ public void testSearchOpenKeysWithPagination() throws IOException { "Expected last key to be empty"); } - @Test - public void testKeyLevelSearch() throws IOException { - // FSO Bucket key-level search - Response response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/filea1", 1, ""); - assertEquals(200, response.getStatus()); - KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); - assertEquals(1, result.getFsoKeyInfoList().size()); - assertEquals(0, result.getNonFSOKeyInfoList().size()); - // Assert Total Size - assertEquals(1000, result.getUnreplicatedDataSize()); - assertEquals(1000 * 3, result.getReplicatedDataSize()); - - response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/filea2", 1, ""); - assertEquals(200, response.getStatus()); - result = (KeyInsightInfoResponse) response.getEntity(); - assertEquals(1, result.getFsoKeyInfoList().size()); - assertEquals(0, result.getNonFSOKeyInfoList().size()); - // Assert Total Size - assertEquals(1000, result.getUnreplicatedDataSize()); - assertEquals(1000 * 3, result.getReplicatedDataSize()); - - // OBS Bucket key-level search - response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/fileb1", 1, ""); - assertEquals(200, response.getStatus()); - result = (KeyInsightInfoResponse) response.getEntity(); - assertEquals(0, result.getFsoKeyInfoList().size()); - assertEquals(1, result.getNonFSOKeyInfoList().size()); - // Assert Total Size - assertEquals(1000, result.getUnreplicatedDataSize()); - assertEquals(1000 * 3, result.getReplicatedDataSize()); - - response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1/fileb2", 1, ""); - assertEquals(200, response.getStatus()); - result = (KeyInsightInfoResponse) response.getEntity(); - assertEquals(0, result.getFsoKeyInfoList().size()); - assertEquals(1, result.getNonFSOKeyInfoList().size()); - // Assert Total Size - assertEquals(1000, result.getUnreplicatedDataSize()); - assertEquals(1000 * 3, result.getReplicatedDataSize()); - } - /** * Tests the NSSummaryEndpoint for a given volume, bucket, and directory structure. From 55d0f8910bb5a1d3b737a94de08682f44238471f Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 30 May 2024 04:04:18 +0530 Subject: [PATCH 27/32] Added more tests to the search endpoint and fixes issues with key level search --- .../apache/hadoop/ozone/recon/ReconUtils.java | 53 ++++- .../ozone/recon/api/OMDBInsightEndpoint.java | 46 +--- .../recon/api/OMDBInsightSearchEndpoint.java | 127 +++------- .../api/TestOMDBInsightSearchEndpoint.java | 222 ++++++++++++++++-- 4 files changed, 292 insertions(+), 156 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index fe920b7098b5..eb6a6b35dd72 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -32,10 +32,7 @@ import java.text.ParseException; import java.text.SimpleDateFormat; import java.time.Instant; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.TimeZone; +import java.util.*; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -79,6 +76,7 @@ import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; @@ -569,6 +567,53 @@ public static long convertToEpochMillis(String dateString, String dateFormat, Ti } } + /** + * Finds all subdirectories under a parent directory in an FSO bucket. It builds + * a list of paths for these subdirectories. These sub-directories are then used + * to search for open files in the openFileTable. + * + * How it works: + * - Starts from a parent directory identified by parentId. + * - Looks through all child directories of this parent. + * - For each child, it creates a path that starts with volumeID/bucketID/parentId, + * following our openFileTable format + * - Adds these paths to a list and explores each child further for more subdirectories. + * + * @param parentId The ID of the directory we start exploring from. + * @param subPaths A list where we collect paths to all subdirectories. + * @param names An array with at least two elements: the first is volumeID and + * the second is bucketID. These are used to start each path. + * @throws IOException If there are problems accessing directory information. + */ + public static void gatherSubPaths(long parentId, List subPaths, + long volumeID, long bucketID, + ReconNamespaceSummaryManager reconNamespaceSummaryManager) + throws IOException { + // Fetch the NSSummary object for parentId + NSSummary parentSummary = + reconNamespaceSummaryManager.getNSSummary(parentId); + if (parentSummary == null) { + return; + } + + Set childDirIds = parentSummary.getChildDir(); + for (Long childId : childDirIds) { + // Fetch the NSSummary for each child directory + NSSummary childSummary = + reconNamespaceSummaryManager.getNSSummary(childId); + if (childSummary != null) { + String subPath = + ReconUtils.constructObjectPathWithPrefix(volumeID, bucketID, + childId); + // Add to subPaths + subPaths.add(subPath); + // Recurse into this child directory + gatherSubPaths(childId, subPaths, volumeID, bucketID, + reconNamespaceSummaryManager); + } + } + } + /** * Validates volume or bucket names according to specific rules. * diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index f4aaf50dfc57..129328441c2a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -1061,7 +1061,8 @@ public Map searchKeysInFSO(ParamInfo paramInfo) subPaths.add(startPrefixObjectPath); // Recursively gather all subpaths - gatherSubPaths(parentId, subPaths, Long.parseLong(names[0]), Long.parseLong(names[1])); + ReconUtils.gatherSubPaths(parentId, subPaths, Long.parseLong(names[0]), + Long.parseLong(names[1]), reconNamespaceSummaryManager); // Iterate over the subpaths and retrieve the files for (String subPath : subPaths) { paramInfo.setStartPrefix(subPath); @@ -1082,49 +1083,6 @@ public Map searchKeysInFSO(ParamInfo paramInfo) return matchedKeys; } - /** - * Finds all subdirectories under a parent directory in an FSO bucket. It builds - * a list of paths for these subdirectories. These sub-directories are then used - * to search for files in the fileTable. - *

- * How it works: - * - Starts from a parent directory identified by parentId. - * - Looks through all child directories of this parent. - * - For each child, it creates a path that starts with volumeID/bucketID/parentId, - * following our fileTable format - * - Adds these paths to a list and explores each child further for more subdirectories. - * - * @param parentId The ID of the directory we start exploring from. - * @param subPaths A list where we collect paths to all subdirectories. - * @param volumeID - * @param bucketID - * @throws IOException If there are problems accessing directory information. - */ - private void gatherSubPaths(long parentId, List subPaths, - long volumeID, long bucketID) throws IOException { - // Fetch the NSSummary object for parentId - NSSummary parentSummary = - reconNamespaceSummaryManager.getNSSummary(parentId); - if (parentSummary == null) { - return; - } - - Set childDirIds = parentSummary.getChildDir(); - for (Long childId : childDirIds) { - // Fetch the NSSummary for each child directory - NSSummary childSummary = - reconNamespaceSummaryManager.getNSSummary(childId); - if (childSummary != null) { - String subPath = - ReconUtils.constructObjectPathWithPrefix(volumeID, bucketID, childId); - // Add to subPaths - subPaths.add(subPath); - // Recurse into this child directory - gatherSubPaths(childId, subPaths, volumeID, bucketID); - } - } - } - /** * Converts a startPrefix path into an objectId path for FSO buckets, using IDs. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 830974bb88e4..4c8229bc0946 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -22,8 +22,10 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; @@ -46,7 +48,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.ArrayList; -import java.util.Set; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_START_PREFIX; @@ -219,7 +220,8 @@ public Map searchOpenKeysInFSO(String startPrefix, subPaths.add(startPrefixObjectPath); // Recursively gather all subpaths - gatherSubPaths(parentId, subPaths, names); + ReconUtils.gatherSubPaths(parentId, subPaths, Long.parseLong(names[0]), Long.parseLong(names[1]), + reconNamespaceSummaryManager); // Iterate over the subpaths and retrieve the open files for (String subPath : subPaths) { @@ -236,48 +238,6 @@ public Map searchOpenKeysInFSO(String startPrefix, return matchedKeys; } - /** - * Finds all subdirectories under a parent directory in an FSO bucket. It builds - * a list of paths for these subdirectories. These sub-directories are then used - * to search for open files in the openFileTable. - * - * How it works: - * - Starts from a parent directory identified by parentId. - * - Looks through all child directories of this parent. - * - For each child, it creates a path that starts with volumeID/bucketID/parentId, - * following our openFileTable format - * - Adds these paths to a list and explores each child further for more subdirectories. - * - * @param parentId The ID of the directory we start exploring from. - * @param subPaths A list where we collect paths to all subdirectories. - * @param names An array with at least two elements: the first is volumeID and - * the second is bucketID. These are used to start each path. - * @throws IOException If there are problems accessing directory information. - */ - private void gatherSubPaths(long parentId, List subPaths, - String[] names) throws IOException { - // Fetch the NSSummary object for parentId - NSSummary parentSummary = reconNamespaceSummaryManager.getNSSummary(parentId); - if (parentSummary == null) { - return; - } - long volumeID = Long.parseLong(names[0]); - long bucketID = Long.parseLong(names[1]); - Set childDirIds = parentSummary.getChildDir(); - for (Long childId : childDirIds) { - // Fetch the NSSummary for each child directory - NSSummary childSummary = reconNamespaceSummaryManager.getNSSummary(childId); - if (childSummary != null) { - String subPath = constructObjectPathWithPrefix(volumeID, bucketID, childId); - // Add to subPaths - subPaths.add(subPath); - // Recurse into this child directory - gatherSubPaths(childId, subPaths, names); - } - } - } - - /** * Converts a key prefix into an object path for FSO buckets, using IDs. *

@@ -292,6 +252,7 @@ private void gatherSubPaths(long parentId, List subPaths, public String convertToObjectPath(String prevKeyPrefix) throws IOException { try { String[] names = parseRequestPath(normalizePath(prevKeyPrefix, BucketLayout.FILE_SYSTEM_OPTIMIZED)); + Table openFileTable = omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); // Root-Level: Return the original path if (names.length == 0) { @@ -317,66 +278,46 @@ public String convertToObjectPath(String prevKeyPrefix) throws IOException { return constructObjectPathWithPrefix(volumeId, bucketId); } - // Check if the last element is a key - if (isKey(volumeId, bucketId, names[names.length - 1])) { - // The last element is a key hence return the path with the key prefix - return constructObjectPathWithPrefix(volumeId, bucketId, bucketId) + OM_KEY_PREFIX + names[names.length - 1]; - } - - // Attempt to fetch the immediate parentID which could be a directory or the bucket itself + // Directory or Key-Level: Check both key and directory BucketHandler handler = getBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); - long dirObjectId; - try { - dirObjectId = handler.getDirInfo(names).getObjectID(); - } catch (NullPointerException e) { - // If fetching directory also then it's neither a key nor a directory hence unknown entity. - return constructObjectPathWithPrefix(volumeId, bucketId, bucketId) + OM_KEY_PREFIX + names[names.length - 1]; - } - // If it's a directory, return the path with the directory prefix with a trailing '/' indicating a directory - return constructObjectPathWithPrefix(volumeId, bucketId, dirObjectId) + OM_KEY_PREFIX; - } catch (IllegalArgumentException e) { - LOG.error("IllegalArgumentException encountered while converting key prefix to object path: {}", prevKeyPrefix, e); - throw e; - } catch (RuntimeException e) { - LOG.error("RuntimeException encountered while converting key prefix to object path: {}", prevKeyPrefix, e); - return prevKeyPrefix; - } - } + if (names.length >= 3) { + String lastEntiry = names[names.length - 1]; - /** - * Checks if the last element in the path is a key. - * - * @param volumeId The volume ID. - * @param bucketId The bucket ID. - * @param potentialKeyName The name of the potential key. - * @return True if the last element is a key, false otherwise. - * @throws IOException If database access fails. - */ - private boolean isKey(long volumeId, long bucketId, String potentialKeyName) - throws IOException { - String keyPrefixObjectPath = - constructObjectPathWithPrefix(volumeId, bucketId, bucketId); + // Check if the directory exists + OmDirectoryInfo dirInfo = handler.getDirInfo(names); + if (dirInfo != null && dirInfo.getName().equals(lastEntiry)) { + return constructObjectPathWithPrefix(volumeId, bucketId, dirInfo.getObjectID()) + OM_KEY_PREFIX; + } - Table openFileTable = - omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); - try (TableIterator> keyIter = openFileTable.iterator()) { - String searchKey = keyPrefixObjectPath + OM_KEY_PREFIX + potentialKeyName; - keyIter.seek(searchKey); - if (keyIter.hasNext()) { - Table.KeyValue keyValue = keyIter.next(); - return keyValue.getValue().getFileName().equals(potentialKeyName); + // Check if the key exists + long dirID = handler.getDirObjectId(names, names.length); + String keyKey = constructObjectPathWithPrefix(volumeId, bucketId, dirID) + + OM_KEY_PREFIX + lastEntiry; + OmKeyInfo keyInfo = openFileTable.getSkipCache(keyKey); + if (keyInfo != null && keyInfo.getFileName().equals(lastEntiry)) { + return constructObjectPathWithPrefix(volumeId, bucketId, + keyInfo.getParentObjectID()) + OM_KEY_PREFIX + lastEntiry; + } + + return prevKeyPrefix; } - return false; - } catch (IOException e) { + } catch (IllegalArgumentException e) { LOG.error( - "IOException encountered while checking if the last element is a key: {}", - potentialKeyName, e); + "IllegalArgumentException encountered while converting key prefix to object path: {}", + prevKeyPrefix, e); throw e; + } catch (RuntimeException e) { + LOG.error( + "RuntimeException encountered while converting key prefix to object path: {}", + prevKeyPrefix, e); + return prevKeyPrefix; } + return prevKeyPrefix; } + /** * Common method to retrieve keys from a table based on a search prefix and a limit. * diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 8b437a6d6d87..2c5d027301d1 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -63,7 +63,23 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; /** - * Test for OMDBInsightSearchEndpoint. + * Test class for OMDBInsightSearchEndpoint. + * + * This class tests various scenarios for searching open keys within a + * given volume, bucket, and directory structure. The tests include: + * + * 1. Test Root Level Search Restriction: Ensures searching at the root level returns a bad request. + * 2. Test Volume Level Search Restriction: Ensures searching at the volume level returns a bad request. + * 3. Test Bucket Level Search: Verifies search results within different types of buckets (FSO, OBS, Legacy). + * 4. Test Directory Level Search: Validates searching inside specific directories. + * 5. Test Key Level Search: Confirms search results for specific keys within buckets. + * 6. Test Key Level Search Under Directory: Verifies searching for keys within nested directories. + * 7. Test Search Under Nested Directory: Checks search results within nested directories under dira3. + * 8. Test Limit Search: Tests the limit functionality of the search API. + * 9. Test Search Open Keys with Bad Request: Ensures bad requests with invalid parameters return appropriate responses. + * 10. Test Last Key in Response: Confirms the presence of the last key in paginated responses. + * 11. Test Search Open Keys with Pagination: Verifies paginated search results. + * 12. Test Search in Empty Bucket: Checks the response for searching within an empty bucket. */ public class TestOMDBInsightSearchEndpoint extends AbstractReconSqlDBTest { @@ -172,17 +188,19 @@ public void testVolumeLevelSearchRestriction() throws IOException { @Test public void testBucketLevelSearch() throws IOException { + // Search inside FSO bucket Response response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1", 20, ""); assertEquals(200, response.getStatus()); KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); - assertEquals(5, result.getFsoKeyInfoList().size()); + assertEquals(14, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Assert Total Size - assertEquals(5000, result.getUnreplicatedDataSize()); - assertEquals(5000 * 3, result.getReplicatedDataSize()); + assertEquals(14000, result.getUnreplicatedDataSize()); + assertEquals(14000 * 3, result.getReplicatedDataSize()); + // Search inside OBS bucket response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb1", 20, ""); assertEquals(200, response.getStatus()); @@ -194,6 +212,13 @@ public void testBucketLevelSearch() throws IOException { assertEquals(5000, result.getUnreplicatedDataSize()); assertEquals(5000 * 3, result.getReplicatedDataSize()); + // Search Inside LEGACY bucket + response = + omdbInsightSearchEndpoint.searchOpenKeys("/volc/bucketc1", 20, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(7, result.getNonFSOKeyInfoList().size()); + // Test with bucket that does not exist response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/nonexistentbucket", 20, ""); assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); @@ -231,11 +256,11 @@ public void testDirectoryLevelSearch() throws IOException { assertEquals(200, response.getStatus()); result = (KeyInsightInfoResponse) response.getEntity(); - assertEquals(1, result.getFsoKeyInfoList().size()); + assertEquals(10, result.getFsoKeyInfoList().size()); assertEquals(0, result.getNonFSOKeyInfoList().size()); // Assert Total Size - assertEquals(1000, result.getUnreplicatedDataSize()); - assertEquals(1000 * 3, result.getReplicatedDataSize()); + assertEquals(10000, result.getUnreplicatedDataSize()); + assertEquals(10000 * 3, result.getReplicatedDataSize()); // Test with non-existent directory response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/nonexistentdir", 20, ""); @@ -299,6 +324,97 @@ public void testKeyLevelSearch() throws IOException { "Expected a message indicating no keys were found"); } + // Test searching for keys under a directory + @Test + public void testKeyLevelSearchUnderDirectory() throws IOException { + // FSO Bucket key-level search + Response response = + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira1/innerfile", 10, ""); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + + response = + omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira2/innerfile", 10, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + + // Test for unknown file in fso bucket + response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira1/unknownfile", 10, ""); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + + // Test for unknown file in fso bucket + response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira2/unknownfile", 10, ""); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + + + @Test + public void testSearchUnderNestedDirectory() throws IOException { + Response response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira3", 20, + ""); + assertEquals(200, response.getStatus()); + KeyInsightInfoResponse result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(10, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + + // Search under dira31 + response = omdbInsightSearchEndpoint.searchOpenKeys("/vola/bucketa1/dira3/dira31", + 20, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(6, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + + // Search under dira32 + response = omdbInsightSearchEndpoint.searchOpenKeys( + "/vola/bucketa1/dira3/dira31/dira32", 20, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(3, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + + // Search under dira33 + response = omdbInsightSearchEndpoint.searchOpenKeys( + "/vola/bucketa1/dira3/dira31/dira32/dira33", 20, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + + // Search for the exact file under dira33 + response = omdbInsightSearchEndpoint.searchOpenKeys( + "/vola/bucketa1/dira3/dira31/dira32/dira33/file33_1", 20, ""); + assertEquals(200, response.getStatus()); + result = (KeyInsightInfoResponse) response.getEntity(); + assertEquals(1, result.getFsoKeyInfoList().size()); + assertEquals(0, result.getNonFSOKeyInfoList().size()); + + // Search for a non existant file under each nested directory + response = omdbInsightSearchEndpoint.searchOpenKeys( + "/vola/bucketa1/dira3/dira31/dira32/dira33/nonexistentfile", 20, ""); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + + response = omdbInsightSearchEndpoint.searchOpenKeys( + "/vola/bucketa1/dira3/dira31/dira32/nonexistentfile", 20, ""); + assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } + @Test public void testLimitSearch() throws IOException { Response response = @@ -381,6 +497,15 @@ public void testSearchOpenKeysWithPagination() throws IOException { "Expected last key to be empty"); } + @Test + public void testSearchInEmptyBucket() throws IOException { + // Search in empty bucket bucketb2 + Response response = omdbInsightSearchEndpoint.searchOpenKeys("/volb/bucketb2", 20, ""); + assertEquals(404, response.getStatus()); + String entity = (String) response.getEntity(); + assertTrue(entity.contains("No keys matched the search prefix"), + "Expected a message indicating no keys were found"); + } /** * Tests the NSSummaryEndpoint for a given volume, bucket, and directory structure. @@ -394,6 +519,9 @@ public void testSearchOpenKeysWithPagination() throws IOException { * │ │ ├── dira1 (Total Size: 1000KB) * │ │ ├── dira2 (Total Size: 1000KB) * │ │ └── dira3 (Total Size: 1000KB) + * │ │ ├── dira31 (Total Size: 1000KB) + * │ │ ├── dira32 (Total Size: 1000KB) + * │ │ └── dira33 (Total Size: 1000KB) * │ ├── bucketa2 (FSO) Total Size: 5000KB * │ │ ├── filea3 (Size: 1000KB) * │ │ ├── filea4 (Size: 1000KB) @@ -401,12 +529,20 @@ public void testSearchOpenKeysWithPagination() throws IOException { * │ │ ├── dira5 (Total Size: 1000KB) * │ │ └── dira6 (Total Size: 1000KB) * └── volb (Total Size: 5000KB) - * └── bucketb1 (OBS) Total Size: 5000KB - * ├── fileb1 (Size: 1000KB) - * ├── fileb2 (Size: 1000KB) - * ├── fileb3 (Size: 1000KB) - * ├── fileb4 (Size: 1000KB) - * └── fileb5 (Size: 1000KB) + * ├── bucketb1 (OBS) Total Size: 5000KB + * │ ├── fileb1 (Size: 1000KB) + * │ ├── fileb2 (Size: 1000KB) + * │ ├── fileb3 (Size: 1000KB) + * │ ├── fileb4 (Size: 1000KB) + * │ └── fileb5 (Size: 1000KB) + * └── bucketb2 (OBS) Total Size: 0KB (Empty Bucket) + * └── volc (Total Size: 7000KB) + * └── bucketc1 (LEGACY) Total Size: 7000KB + * ├── filec1 (Size: 1000KB) + * ├── filec2 (Size: 1000KB) + * ├── filec3 (Size: 1000KB) + * ├── dirc1/ (Total Size: 2000KB) + * └── dirc2/ (Total Size: 2000KB) * * @throws Exception */ @@ -414,6 +550,7 @@ private void populateOMDB() throws Exception { // Create Volumes long volaObjectId = createVolume("vola"); createVolume("volb"); + createVolume("volc"); // Create Buckets in vola long bucketa1ObjectId = @@ -426,6 +563,11 @@ private void populateOMDB() throws Exception { // Create Bucket in volb createBucket("volb", "bucketb1", 1000 + 1000 + 1000 + 1000 + 1000, getOBSBucketLayout()); + createBucket("volb", "bucketb2", 0, getOBSBucketLayout()); // Empty Bucket + + // Create Bucket in volc + createBucket("volc", "bucketc1", 7000, + getLegacyBucketLayout()); // Create Directories and Files under bucketa1 long dira1ObjectId = @@ -438,12 +580,51 @@ private void populateOMDB() throws Exception { createDirectory(bucketa1ObjectId, bucketa1ObjectId, volaObjectId, "dira3"); + // Create nested directories under dira3 + long dira31ObjectId = + createDirectory(dira3ObjectId, bucketa1ObjectId, volaObjectId, + "dira31"); + long dira32ObjectId = + createDirectory(dira31ObjectId, bucketa1ObjectId, volaObjectId, + "dira32"); + long dira33ObjectId = + createDirectory(dira32ObjectId, bucketa1ObjectId, volaObjectId, + "dira33"); + // Files directly under bucketa1 createOpenFile("filea1", "bucketa1", "vola", "filea1", bucketa1ObjectId, bucketa1ObjectId, volaObjectId, 1000); createOpenFile("filea2", "bucketa1", "vola", "filea2", bucketa1ObjectId, bucketa1ObjectId, volaObjectId, 1000); + // Files under dira3 + createOpenFile("dira3/file3_1", "bucketa1", "vola", "file3_1", + dira3ObjectId, bucketa1ObjectId, volaObjectId, 1000); + createOpenFile("dira3/file3_2", "bucketa1", "vola", "file3_2", + dira3ObjectId, bucketa1ObjectId, volaObjectId, 1000); + createOpenFile("dira3/file3_3", "bucketa1", "vola", "file3_3", + dira3ObjectId, bucketa1ObjectId, volaObjectId, 1000); + createOpenFile("dira3/file3_4", "bucketa1", "vola", "file3_4", + dira3ObjectId, bucketa1ObjectId, volaObjectId, 1000); + + // Files under dira31 + createOpenFile("dira3/dira31/file31_1", "bucketa1", "vola", "file31_1", + dira31ObjectId, bucketa1ObjectId, volaObjectId, 1000); + createOpenFile("dira3/dira31/file31_2", "bucketa1", "vola", "file31_2", + dira31ObjectId, bucketa1ObjectId, volaObjectId, 1000); + createOpenFile("dira3/dira31/file31_3", "bucketa1", "vola", "file31_3", + dira31ObjectId, bucketa1ObjectId, volaObjectId, 1000); + + // Files under dira32 + createOpenFile("dira3/dira31/dira32/file32_1", "bucketa1", "vola", "file32_1", + dira32ObjectId, bucketa1ObjectId, volaObjectId, 1000); + createOpenFile("dira3/dira31/dira32/file32_2", "bucketa1", "vola", "file32_2", + dira32ObjectId, bucketa1ObjectId, volaObjectId, 1000); + + // Files under dira33 + createOpenFile("dira3/dira31/dira32/dira33/file33_1", "bucketa1", "vola", "file33_1", + dira33ObjectId, bucketa1ObjectId, volaObjectId, 1000); + // Create Directories and Files under bucketa2 long dira4ObjectId = createDirectory(bucketa2ObjectId, bucketa2ObjectId, volaObjectId, @@ -473,14 +654,21 @@ private void populateOMDB() throws Exception { dira1ObjectId, bucketa1ObjectId, volaObjectId, 1000); createOpenFile("dira2/innerfile", "bucketa1", "vola", "innerfile", dira2ObjectId, bucketa1ObjectId, volaObjectId, 1000); - createOpenFile("dira3/innerfile", "bucketa1", "vola", "innerfile", - dira3ObjectId, bucketa1ObjectId, volaObjectId, 1000); createOpenFile("dira4/innerfile", "bucketa2", "vola", "innerfile", dira4ObjectId, bucketa2ObjectId, volaObjectId, 1000); createOpenFile("dira5/innerfile", "bucketa2", "vola", "innerfile", dira5ObjectId, bucketa2ObjectId, volaObjectId, 1000); createOpenFile("dira6/innerfile", "bucketa2", "vola", "innerfile", dira6ObjectId, bucketa2ObjectId, volaObjectId, 1000); + + // Create Keys and Directories in bucketc1 (LEGACY layout) + createOpenKey("filec1", "bucketc1", "volc", 1000); + createOpenKey("filec2", "bucketc1", "volc", 1000); + createOpenKey("filec3", "bucketc1", "volc", 1000); + createOpenKey("dirc1/", "bucketc1", "volc", 2000); // Directory indicated by trailing slash + createOpenKey("dirc2/", "bucketc1", "volc", 2000); // Directory indicated by trailing slash + createOpenKey("dirc1/innerfile", "bucketc1", "volc", 2000); // File in directory + createOpenKey("dirc2/innerfile", "bucketc1", "volc", 2000); // File in directory } /** @@ -595,4 +783,8 @@ private static BucketLayout getOBSBucketLayout() { return BucketLayout.OBJECT_STORE; } + private static BucketLayout getLegacyBucketLayout() { + return BucketLayout.LEGACY; + } + } From ab4e22c1496caed2e0770306480d8ff9715bb4f8 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 30 May 2024 12:02:54 +0530 Subject: [PATCH 28/32] Fixed checksytle issues --- .../java/org/apache/hadoop/ozone/recon/ReconUtils.java | 7 +++++-- .../apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java | 1 - 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index eb6a6b35dd72..905894206cab 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -32,7 +32,11 @@ import java.text.ParseException; import java.text.SimpleDateFormat; import java.time.Instant; -import java.util.*; +import java.util.List; +import java.util.TimeZone; +import java.util.Date; +import java.util.Set; +import java.util.ArrayList; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -76,7 +80,6 @@ import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue; import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; -import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 129328441c2a..3f95c04fc916 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -61,7 +61,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.TimeZone; import java.util.function.Predicate; import java.util.stream.Collectors; From 2149e09ea57879815dbdef660f009579583dc084 Mon Sep 17 00:00:00 2001 From: arafat Date: Thu, 30 May 2024 19:41:06 +0530 Subject: [PATCH 29/32] Fixed javadoc and added one more test case --- .../ozone/recon/api/OMDBInsightSearchEndpoint.java | 10 ++++++++-- .../ozone/recon/api/TestOMDBInsightSearchEndpoint.java | 6 ++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 4c8229bc0946..29b33020d8af 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -240,12 +240,18 @@ public Map searchOpenKeysInFSO(String startPrefix, /** * Converts a key prefix into an object path for FSO buckets, using IDs. - *

+ * * This method transforms a user-provided path (e.g., "volume/bucket/dir1") into * a database-friendly format ("/volumeID/bucketID/ParentId/") by replacing names * with their corresponding IDs. It simplifies database queries for FSO bucket operations. * - * @param prevKeyPrefix The path to be converted, not including key or directory names/IDs. + * Examples: + * - Input: "volume/bucket/key" -> Output: "/volumeID/bucketID/parentDirID/key" + * - Input: "volume/bucket/dir1" -> Output: "/volumeID/bucketID/dir1ID/" + * - Input: "volume/bucket/dir1/key1" -> Output: "/volumeID/bucketID/dir1ID/key1" + * - Input: "volume/bucket/dir1/dir2" -> Output: "/volumeID/bucketID/dir2ID/" + * + * @param prevKeyPrefix The path to be converted. * @return The object path as "/volumeID/bucketID/ParentId/" or an empty string if an error occurs. * @throws IOException If database access fails. */ diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java index 2c5d027301d1..ab16f349af27 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOMDBInsightSearchEndpoint.java @@ -439,6 +439,12 @@ public void testSearchOpenKeysWithBadRequest() throws IOException { String entity = (String) response.getEntity(); assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), "Expected a message indicating the path must be at the bucket level or deeper"); + + response = omdbInsightSearchEndpoint.searchOpenKeys("///", 20, ""); + assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus()); + entity = (String) response.getEntity(); + assertTrue(entity.contains("Invalid startPrefix: Path must be at the bucket level or deeper"), + "Expected a message indicating the path must be at the bucket level or deeper"); } @Test From e6ff273bf2eef7ab25fd8e975f9466e8908e39cd Mon Sep 17 00:00:00 2001 From: arafat Date: Tue, 16 Jul 2024 11:33:26 +0530 Subject: [PATCH 30/32] Improved Java doc --- .mvn/.develocity/develocity-workspace-id | 1 + .../hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 .mvn/.develocity/develocity-workspace-id diff --git a/.mvn/.develocity/develocity-workspace-id b/.mvn/.develocity/develocity-workspace-id new file mode 100644 index 000000000000..dd011f134701 --- /dev/null +++ b/.mvn/.develocity/develocity-workspace-id @@ -0,0 +1 @@ +cgilmul745hetovwivw7cx5zna \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java index 29b33020d8af..9cd6fa33d032 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java @@ -105,6 +105,7 @@ public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM, * @param prevKey The key to start after for the next set of records. * @return A KeyInsightInfoResponse, containing matching keys and their data sizes. * @throws IOException On failure to access the OM database or process the operation. + * @throws IllegalArgumentException If the provided startPrefix or other arguments are invalid. */ @GET @Path("/open/search") @@ -254,6 +255,7 @@ public Map searchOpenKeysInFSO(String startPrefix, * @param prevKeyPrefix The path to be converted. * @return The object path as "/volumeID/bucketID/ParentId/" or an empty string if an error occurs. * @throws IOException If database access fails. + * @throws IllegalArgumentException If the provided path is invalid or cannot be converted. */ public String convertToObjectPath(String prevKeyPrefix) throws IOException { try { From 4a09ffcef9ef4758957ca4de10f4a2139880e639 Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 17 Jul 2024 19:21:22 +0530 Subject: [PATCH 31/32] Made changes to javadoc --- .../org/apache/hadoop/ozone/recon/ReconUtils.java | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 905894206cab..425121675ef7 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -579,14 +579,15 @@ public static long convertToEpochMillis(String dateString, String dateFormat, Ti * - Starts from a parent directory identified by parentId. * - Looks through all child directories of this parent. * - For each child, it creates a path that starts with volumeID/bucketID/parentId, - * following our openFileTable format + * following our openFileTable format. * - Adds these paths to a list and explores each child further for more subdirectories. * - * @param parentId The ID of the directory we start exploring from. - * @param subPaths A list where we collect paths to all subdirectories. - * @param names An array with at least two elements: the first is volumeID and - * the second is bucketID. These are used to start each path. - * @throws IOException If there are problems accessing directory information. + * @param parentId The ID of the parent directory from which to start gathering subdirectories. + * @param subPaths The list to which the paths of subdirectories will be added. + * @param volumeID The ID of the volume containing the parent directory. + * @param bucketID The ID of the bucket containing the parent directory. + * @param reconNamespaceSummaryManager The manager used to retrieve NSSummary objects. + * @throws IOException If an I/O error occurs while fetching NSSummary objects. */ public static void gatherSubPaths(long parentId, List subPaths, long volumeID, long bucketID, From ec272c4c8445f3270a991463691985f52d030afe Mon Sep 17 00:00:00 2001 From: arafat Date: Wed, 17 Jul 2024 19:25:38 +0530 Subject: [PATCH 32/32] Removed unwanted file --- .mvn/.develocity/develocity-workspace-id | 1 - 1 file changed, 1 deletion(-) delete mode 100644 .mvn/.develocity/develocity-workspace-id diff --git a/.mvn/.develocity/develocity-workspace-id b/.mvn/.develocity/develocity-workspace-id deleted file mode 100644 index dd011f134701..000000000000 --- a/.mvn/.develocity/develocity-workspace-id +++ /dev/null @@ -1 +0,0 @@ -cgilmul745hetovwivw7cx5zna \ No newline at end of file