From d6f966635e55027cfd3ff3439ec0dd3a277ab438 Mon Sep 17 00:00:00 2001 From: Rishabh Patel Date: Fri, 21 Mar 2025 10:37:00 -0700 Subject: [PATCH 1/4] add integration test for checksums --- .../hadoop/ozone/debug/TestReplicasCli.java | 367 ++++++++++++++++++ 1 file changed, 367 insertions(+) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestReplicasCli.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestReplicasCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestReplicasCli.java new file mode 100644 index 000000000000..7f264dca3245 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestReplicasCli.java @@ -0,0 +1,367 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.debug; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.singletonMap; +import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.security.MessageDigest; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.ClientConfigForTesting; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.audit.AuditLogTestUtils; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.debug.replicas.ReplicasDebug; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.OzoneTestBase; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.io.TempDir; +import picocli.CommandLine; + +@TestMethodOrder(MethodOrderer.MethodName.class) +public class TestReplicasCli extends OzoneTestBase { + private static MiniOzoneCluster cluster = null; + private static OzoneClient ozClient = null; + private static ObjectStore store = null; + private static OzoneManager ozoneManager; + private static StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; + private static MessageDigest eTagProvider; + private static Set ozoneClients = new HashSet<>(); + private static GenericTestUtils.PrintStreamCapturer output; + @TempDir + private File tempDir; + private StringWriter stdout, stderr; + private PrintWriter pstdout, pstderr; + private CommandLine cmd; + + @BeforeAll + public static void initialize() throws Exception { + eTagProvider = MessageDigest.getInstance(MD5_HASH); + AuditLogTestUtils.enableAuditLog(); + output = GenericTestUtils.captureOut(); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); + conf.setBoolean(OzoneConfigKeys.OZONE_ACL_ENABLED, true); + conf.set(OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS, OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE); + startCluster(conf); + } + + @BeforeEach + public void setup() throws IOException { + stdout = new StringWriter(); + pstdout = new PrintWriter(stdout); + stderr = new StringWriter(); + pstderr = new PrintWriter(stderr); + + cmd = new CommandLine(new ReplicasDebug()) + .setOut(pstdout) + .setErr(pstderr); + } + + @AfterEach + public void shutdown() throws IOException { + pstderr.close(); + stderr.close(); + pstdout.close(); + stdout.close(); + } + + @AfterAll + public static void teardown() throws IOException { + shutdownCluster(); + } + + /** + * Create a MiniOzoneCluster for testing. + * @param conf Configurations to start the cluster. + */ + static void startCluster(OzoneConfiguration conf) throws Exception { + startCluster(conf, MiniOzoneCluster.newBuilder(conf)); + } + + static void startCluster(OzoneConfiguration conf, MiniOzoneCluster.Builder builder) throws Exception { + // Reduce long wait time in MiniOzoneClusterImpl#waitForHddsDatanodesStop + // for testZReadKeyWithUnhealthyContainerReplica. + conf.set("ozone.scm.stale.node.interval", "1s"); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); + + ClientConfigForTesting.newBuilder(StorageUnit.MB) + .setDataStreamMinPacketSize(1) + .applyTo(conf); + + cluster = builder + .setNumDatanodes(14) + .build(); + cluster.waitForClusterToBeReady(); + ozClient = OzoneClientFactory.getRpcClient(conf); + ozoneClients.add(ozClient); + store = ozClient.getObjectStore(); + storageContainerLocationClient = + cluster.getStorageContainerLocationClient(); + ozoneManager = cluster.getOzoneManager(); + } + + /** + * Close OzoneClient and shutdown MiniOzoneCluster. + */ + static void shutdownCluster() { + org.apache.hadoop.hdds.utils.IOUtils.closeQuietly(ozoneClients); + ozoneClients.clear(); + org.apache.hadoop.hdds.utils.IOUtils.closeQuietly(output); + + if (storageContainerLocationClient != null) { + storageContainerLocationClient.close(); + } + + if (cluster != null) { + cluster.shutdown(); + } + } + + private OzoneKeyDetails createTestKey(OzoneBucket bucket) throws IOException { + return createTestKey(bucket, getTestName(), UUID.randomUUID().toString()); + } + + private OzoneKeyDetails createTestKey( + OzoneBucket bucket, String keyName, String keyValue + ) throws IOException { + return createTestKey(bucket, keyName, keyValue.getBytes(UTF_8)); + } + + private OzoneKeyDetails createTestKey( + OzoneBucket bucket, String keyName, byte[] bytes + ) throws IOException { + RatisReplicationConfig replication = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE); + Map metadata = singletonMap("key", RandomStringUtils.randomAscii(10)); + try (OzoneOutputStream out = bucket.createKey(keyName, bytes.length, replication, metadata)) { + out.write(bytes); + } + OzoneKeyDetails key = bucket.getKey(keyName); + assertNotNull(key); + assertEquals(keyName, key.getName()); + return key; + } + + private String generateKeys() + throws IOException { + String volumeA = "vol-a-" + RandomStringUtils.randomNumeric(5); + String bucketA = "buc-a-" + RandomStringUtils.randomNumeric(5); + String bucketB = "buc-b-" + RandomStringUtils.randomNumeric(5); + store.createVolume(volumeA); + OzoneVolume volA = store.getVolume(volumeA); + volA.createBucket(bucketA); + volA.createBucket(bucketB); + OzoneBucket volAbucketA = volA.getBucket(bucketA); + OzoneBucket volAbucketB = volA.getBucket(bucketB); + + /* + Create 10 keys in vol-a-/buc-a-, + vol-a-/buc-b-, vol-b-/buc-a- and + vol-b-/buc-b- + */ + String keyBaseA = "key-a-"; + for (int i = 0; i < 10; i++) { + byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); + OzoneOutputStream one = volAbucketA.createKey( + keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), + value.length, RATIS, ONE, + new HashMap<>()); + one.write(value); + one.close(); + OzoneOutputStream two = volAbucketB.createKey( + keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), + value.length, RATIS, ONE, + new HashMap<>()); + two.write(value); + two.close(); + } + + /* + Create 10 keys in vol-a-/buc-a-, + vol-a-/buc-b-, vol-b-/buc-a- and + vol-b-/buc-b- + */ + String keyBaseB = "level1/key-b-"; + for (int i = 0; i < 10; i++) { + byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); + OzoneOutputStream one = volAbucketA.createKey( + keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), + value.length, RATIS, ONE, + new HashMap<>()); + one.write(value); + one.close(); + OzoneOutputStream two = volAbucketB.createKey( + keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), + value.length, RATIS, ONE, + new HashMap<>()); + two.write(value); + two.close(); + } + + Iterator volABucketAIter = volAbucketA.listKeys("key-"); + int volABucketAKeyCount = 0; + while (volABucketAIter.hasNext()) { + volABucketAIter.next(); + volABucketAKeyCount++; + } + assertEquals(10, volABucketAKeyCount); + Iterator volABucketBIter = volAbucketB.listKeys("key-"); + int volABucketBKeyCount = 0; + while (volABucketBIter.hasNext()) { + volABucketBIter.next(); + volABucketBKeyCount++; + } + assertEquals(10, volABucketBKeyCount); + + + Iterator volABucketAKeyAIter = volAbucketA.listKeys("key-a-"); + int volABucketAKeyACount = 0; + while (volABucketAKeyAIter.hasNext()) { + volABucketAKeyAIter.next(); + volABucketAKeyACount++; + } + assertEquals(10, volABucketAKeyACount); + Iterator volABucketAKeyBIter = volAbucketA.listKeys("level1"); + volABucketAKeyBIter.next(); + for (int i = 0; i < 10; i++) { + OzoneKey key = volABucketAKeyBIter.next(); + assertTrue(key.getName().startsWith("level1/key-b-" + i + "-")); + } + assertFalse(volABucketBIter.hasNext()); + + return "/" + volumeA+ "/" + bucketA; + } + + @Test + public void testReplicas() throws Exception { + String volumeA = "vol-a-" + RandomStringUtils.randomNumeric(5); + String bucketA = "buc-a-" + RandomStringUtils.randomNumeric(5); + String bucketB = "buc-b-" + RandomStringUtils.randomNumeric(5); + store.createVolume(volumeA); + OzoneVolume volA = store.getVolume(volumeA); + volA.createBucket(bucketA); + volA.createBucket(bucketB); + OzoneBucket volAbucketA = volA.getBucket(bucketA); + OzoneBucket volAbucketB = volA.getBucket(bucketB); + + /* + Create 10 keys in vol-a-/buc-a-, + vol-a-/buc-b-, vol-b-/buc-a- and + vol-b-/buc-b- + */ + String keyBaseA = "key-a-"; + for (int i = 0; i < 10; i++) { + byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); + OzoneOutputStream one = volAbucketA.createKey( + keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), + value.length, RATIS, ONE, + new HashMap<>()); + one.write(value); + one.close(); + OzoneOutputStream two = volAbucketB.createKey( + keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), + value.length, RATIS, ONE, + new HashMap<>()); + two.write(value); + two.close(); + } + + /* + Create 10 keys in vol-a-/buc-a-, + vol-a-/buc-b-, vol-b-/buc-a- and + vol-b-/buc-b- + */ + String keyBaseB = "level1/key-b-"; + for (int i = 0; i < 10; i++) { + byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); + OzoneOutputStream one = volAbucketA.createKey( + keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), + value.length, RATIS, ONE, + new HashMap<>()); + one.write(value); + one.close(); + OzoneOutputStream two = volAbucketB.createKey( + keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), + value.length, RATIS, ONE, + new HashMap<>()); + two.write(value); + two.close(); + } + + String path = "/"; + // Prepare scan args + List completeScanArgs = new ArrayList<>(); + completeScanArgs.addAll(Arrays.asList( + "verify", + "--checksums", + "--output-dir", tempDir.getAbsolutePath(), + path + )); + + int exitCode = cmd.execute(completeScanArgs.toArray(new String[0])); + assertEquals(0, exitCode, stderr.toString()); + System.out.println(stdout); + } +} + +//TODO: org.apache.hadoop.ozone.dn.scanner.TestContainerScannerIntegrationAbstract.ContainerCorruptions.corruptFile +// see this class to corrupt the blocks and containers and see what type of exceptions are thrown + + From 99a9212841b280409b06767c93c3fc28f043e6fb Mon Sep 17 00:00:00 2001 From: Rishabh Patel Date: Mon, 24 Mar 2025 22:50:14 -0700 Subject: [PATCH 2/4] HDDS-12594. Optimize replica checksum verifier --- .../debug/ozone-debug-corrupt-block.robot | 5 +- .../debug/ozone-debug-dead-datanode.robot | 9 +- .../debug/ozone-debug-stale-datanode.robot | 4 +- .../debug/ozone-debug-tests-ec3-2.robot | 31 ++----- .../debug/ozone-debug-tests-ec6-3.robot | 36 ++------ .../smoketest/debug/ozone-debug-tests.robot | 9 +- .../main/smoketest/debug/ozone-debug.robot | 6 -- .../ozone/debug/replicas/Checksums.java | 85 ++----------------- 8 files changed, 27 insertions(+), 158 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-corrupt-block.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-corrupt-block.robot index 20689b7c0f5e..ba17ca9f085a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-corrupt-block.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-corrupt-block.robot @@ -32,7 +32,7 @@ Test ozone debug checksums with corrupt block replica Set Test Variable ${DIR} ${directory} ${count_files} = Count Files In Directory ${directory} - Should Be Equal As Integers ${count_files} 7 + Should Be Equal As Integers ${count_files} 1 ${json} = Read Replicas Manifest ${md5sum} = Execute md5sum ${TEMP_DIR}/${TESTFILE} | awk '{print $1}' @@ -41,9 +41,6 @@ Test ozone debug checksums with corrupt block replica ${datanode} = Set Variable ${json}[blocks][0][replicas][${replica}][hostname] IF '${datanode}' == '${CORRUPT_DATANODE}' - Verify Corrupt Replica ${json} ${replica} ${md5sum} Should Contain ${json}[blocks][0][replicas][${replica}][exception] Checksum mismatch - ELSE - Verify Healthy Replica ${json} ${replica} ${md5sum} END END diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-dead-datanode.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-dead-datanode.robot index 42ae5dec7e92..33a2b78c3df0 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-dead-datanode.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-dead-datanode.robot @@ -31,11 +31,4 @@ Test ozone debug checksums with one datanode DEAD Set Test Variable ${DIR} ${directory} ${count_files} = Count Files In Directory ${directory} - Should Be Equal As Integers ${count_files} 5 - - ${json} = Read Replicas Manifest - ${md5sum} = Execute md5sum ${TEMP_DIR}/${TESTFILE} | awk '{print $1}' - - FOR ${replica} IN RANGE 2 - Verify Healthy Replica ${json} ${replica} ${md5sum} - END + Should Be Equal As Integers ${count_files} 1 diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-stale-datanode.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-stale-datanode.robot index 36cef5e66518..69448f21e6b9 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-stale-datanode.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-stale-datanode.robot @@ -32,7 +32,7 @@ Test ozone debug checksums with one datanode STALE Set Test Variable ${DIR} ${directory} ${count_files} = Count Files In Directory ${directory} - Should Be Equal As Integers ${count_files} 7 + Should Be Equal As Integers ${count_files} 1 ${json} = Read Replicas Manifest ${md5sum} = Execute md5sum ${TEMP_DIR}/${TESTFILE} | awk '{print $1}' @@ -42,7 +42,5 @@ Test ozone debug checksums with one datanode STALE IF '${datanode}' == '${STALE_DATANODE}' Verify Stale Replica ${json} ${replica} - ELSE - Verify Healthy Replica ${json} ${replica} ${md5sum} END END diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec3-2.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec3-2.robot index 57227458cc15..c578ac76cc29 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec3-2.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec3-2.robot @@ -52,52 +52,35 @@ Create EC key Create EC key 1048576 1 ${directory} = Execute replicas verify checksums CLI tool ${count_files} = Count Files In Directory ${directory} - Should Be Equal As Integers ${count_files} 6 - ${sum_size} = Evaluate 1048576 * 3 - Verify Healthy EC Replica ${directory} 1 ${sum_size} + Should Be Equal As Integers ${count_files} 1 2 data blocks Create EC key 1048576 2 ${directory} = Execute replicas verify checksums CLI tool - ${sum_size} = Evaluate 1048576 * 4 ${count_files} = Count Files In Directory ${directory} - Should Be Equal As Integers ${count_files} 6 - Verify Healthy EC Replica ${directory} 1 ${sum_size} + Should Be Equal As Integers ${count_files} 1 3 data blocks Create EC key 1048576 3 ${directory} = Execute replicas verify checksums CLI tool - ${sum_size} = Evaluate 1048576 * 5 ${count_files} = Count Files In Directory ${directory} - Should Be Equal As Integers ${count_files} 6 - Verify Healthy EC Replica ${directory} 1 ${sum_size} - + Should Be Equal As Integers ${count_files} 1 + 3 data blocks and partial stripe Create EC key 1000000 4 ${directory} = Execute replicas verify checksums CLI tool ${count_files} = Count Files In Directory ${directory} - ${sum_size} = Evaluate 1048576 * 5 ${sum_size_last_stripe} = Evaluate ((1000000 * 4) % 1048576) * 3 - Should Be Equal As Integers ${count_files} 11 - Verify Healthy EC Replica ${directory} 1 ${sum_size} - Verify Healthy EC Replica ${directory} 2 ${sum_size_last_stripe} + Should Be Equal As Integers ${count_files} 1 4 data blocks and partial stripe Create EC key 1000000 5 ${directory} = Execute replicas verify checksums CLI tool ${count_files} = Count Files In Directory ${directory} - ${sum_size} = Evaluate 1048576 * 5 - ${sum_size_last_stripe} = Evaluate 1048576 * 3 + ((1000000 * 5) % 1048576) - Should Be Equal As Integers ${count_files} 11 - Verify Healthy EC Replica ${directory} 1 ${sum_size} - Verify Healthy EC Replica ${directory} 2 ${sum_size_last_stripe} + Should Be Equal As Integers ${count_files} 1 6 data blocks Create EC key 1048576 6 ${directory} = Execute replicas verify checksums CLI tool ${count_files} = Count Files In Directory ${directory} - ${sum_size} = Evaluate 1048576 * 5 - Should Be Equal As Integers ${count_files} 11 - FOR ${block} IN RANGE 1 3 - Verify Healthy EC Replica ${directory} ${block} ${sum_size} - END + Should Be Equal As Integers ${count_files} 1 diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec6-3.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec6-3.robot index 52d48c25f77d..7815e8ef4f75 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec6-3.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests-ec6-3.robot @@ -47,66 +47,48 @@ Create EC key Create EC key 1048576 1 ${directory} = Execute replicas verify checksums CLI tool ${count_files} = Count Files In Directory ${directory} - Should Be Equal As Integers ${count_files} 10 - ${sum_size} = Evaluate 1048576 * 4 - Verify Healthy EC Replica ${directory} 1 ${sum_size} + Should Be Equal As Integers ${count_files} 1 2 data blocks Create EC key 1048576 2 ${directory} = Execute replicas verify checksums CLI tool - ${sum_size} = Evaluate 1048576 * 5 ${count_files} = Count Files In Directory ${directory} - Should Be Equal As Integers ${count_files} 10 - Verify Healthy EC Replica ${directory} 1 ${sum_size} + Should Be Equal As Integers ${count_files} 1 3 data blocks Create EC key 1048576 3 ${directory} = Execute replicas verify checksums CLI tool - ${sum_size} = Evaluate 1048576 * 6 ${count_files} = Count Files In Directory ${directory} - Should Be Equal As Integers ${count_files} 10 - Verify Healthy EC Replica ${directory} 1 ${sum_size} - + Should Be Equal As Integers ${count_files} 1 + 4 data blocks Create EC key 1048576 4 ${directory} = Execute replicas verify checksums CLI tool ${count_files} = Count Files In Directory ${directory} - ${sum_size} = Evaluate 1048576 * 7 - Should Be Equal As Integers ${count_files} 10 - Verify Healthy EC Replica ${directory} 1 ${sum_size} + Should Be Equal As Integers ${count_files} 1 5 data blocks Create EC key 1048576 5 ${directory} = Execute replicas verify checksums CLI tool ${count_files} = Count Files In Directory ${directory} - ${sum_size} = Evaluate 1048576 * 8 - Should Be Equal As Integers ${count_files} 10 - Verify Healthy EC Replica ${directory} 1 ${sum_size} + Should Be Equal As Integers ${count_files} 1 6 data blocks Create EC key 1048576 6 ${directory} = Execute replicas verify checksums CLI tool ${count_files} = Count Files In Directory ${directory} - ${sum_size} = Evaluate 1048576 * 9 - Should Be Equal As Integers ${count_files} 10 - Verify Healthy EC Replica ${directory} 1 ${sum_size} + Should Be Equal As Integers ${count_files} 1 6 data blocks and partial stripe Create EC key 1000000 7 ${directory} = Execute replicas verify checksums CLI tool ${count_files} = Count Files In Directory ${directory} - ${sum_size} = Evaluate 1048576 * 9 ${sum_size_last_stripe} = Evaluate ((1000000 * 7) % 1048576) * 4 - Should Be Equal As Integers ${count_files} 19 - Verify Healthy EC Replica ${directory} 1 ${sum_size} - Verify Healthy EC Replica ${directory} 2 ${sum_size_last_stripe} + Should Be Equal As Integers ${count_files} 1 7 data blocks and partial stripe Create EC key 1000000 8 ${directory} = Execute replicas verify checksums CLI tool ${count_files} = Count Files In Directory ${directory} - ${sum_size} = Evaluate 1048576 * 9 ${sum_size_last_stripe} = Evaluate 1048576 * 4 + ((1000000 * 8) % 1048576) - Should Be Equal As Integers ${count_files} 19 - Verify Healthy EC Replica ${directory} 1 ${sum_size} - Verify Healthy EC Replica ${directory} 2 ${sum_size_last_stripe} + Should Be Equal As Integers ${count_files} 1 diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot index 803ab19ade84..02de6794b237 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-tests.robot @@ -41,14 +41,7 @@ Test ozone debug read-replicas Set Test Variable ${DIR} ${directory} ${count_files} = Count Files In Directory ${directory} - Should Be Equal As Integers ${count_files} 7 - - ${json} = Read Replicas Manifest - ${md5sum} = Execute md5sum ${TEMP_DIR}/${TESTFILE} | awk '{print $1}' - - FOR ${replica} IN RANGE 3 - Verify Healthy Replica ${json} ${replica} ${md5sum} - END + Should Be Equal As Integers ${count_files} 1 Test ozone debug version diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot index 9bb77d00d6d3..b042f606cf2c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot @@ -87,15 +87,9 @@ Verify Stale Replica [arguments] ${json} ${replica} FOR ${block} IN RANGE 2 - ${n} = Evaluate ${block} + 1 ${datanode} = Set Variable ${json}[blocks][${block}][replicas][${replica}][hostname] - ${filename} = Set Variable ${DIR}/${TESTFILE}_block${n}_${datanode} IF '${datanode}' == '${STALE_DATANODE}' - File Should Be Empty ${filename} Should Contain ${json}[blocks][${block}][replicas][${replica}][exception] UNAVAILABLE - ELSE - ${filesize} = Get File Size ${filename} - Should Be Equal As Integers ${json}[blocks][${block}][length] ${filesize} END END diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/Checksums.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/Checksums.java index de6aa05de73c..52eec96a21fe 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/Checksums.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/Checksums.java @@ -17,33 +17,27 @@ package org.apache.hadoop.ozone.debug.replicas; -import static java.util.Collections.emptyMap; - import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import jakarta.annotation.Nonnull; -import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Map; -import org.apache.hadoop.hdds.client.BlockID; +import org.apache.commons.io.IOUtils; +import org.apache.commons.io.output.NullOutputStream; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.server.JsonUtils; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.client.rpc.RpcClient; -import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.slf4j.Logger; @@ -82,11 +76,8 @@ public Checksums(OzoneClient client, String outputDir, Logger log, OzoneConfigur } private void downloadReplicasAndCreateManifest( - String keyName, Map> replicas, - Map> - replicasWithoutChecksum, - File dir, ArrayNode blocks) throws IOException { + ArrayNode blocks) throws IOException { int blockIndex = 0; for (Map.Entry> @@ -97,16 +88,11 @@ private void downloadReplicasAndCreateManifest( blockIndex += 1; OmKeyLocationInfo locationInfo = block.getKey(); blockJson.put(JSON_PROPERTY_BLOCK_INDEX, blockIndex); - blockJson.put(JSON_PROPERTY_BLOCK_CONTAINERID, - locationInfo.getContainerID()); + blockJson.put(JSON_PROPERTY_BLOCK_CONTAINERID, locationInfo.getContainerID()); blockJson.put(JSON_PROPERTY_BLOCK_LOCALID, locationInfo.getLocalID()); blockJson.put(JSON_PROPERTY_BLOCK_LENGTH, locationInfo.getLength()); blockJson.put(JSON_PROPERTY_BLOCK_OFFSET, locationInfo.getOffset()); - BlockID blockID = locationInfo.getBlockID(); - Map blockReplicasWithoutChecksum = - replicasOf(blockID, replicasWithoutChecksum); - for (Map.Entry replica : block.getValue().entrySet()) { DatanodeDetails datanode = replica.getKey(); @@ -116,47 +102,16 @@ private void downloadReplicasAndCreateManifest( replicaJson.put(JSON_PROPERTY_REPLICA_HOSTNAME, datanode.getHostName()); replicaJson.put(JSON_PROPERTY_REPLICA_UUID, datanode.getUuidString()); - String fileName = keyName + "_block" + blockIndex + "_" + - datanode.getHostName(); - Path path = new File(dir, fileName).toPath(); - try (InputStream is = replica.getValue()) { - Files.copy(is, path, StandardCopyOption.REPLACE_EXISTING); + IOUtils.copyLarge(is, NullOutputStream.INSTANCE); } catch (IOException e) { - Throwable cause = e.getCause(); replicaJson.put(JSON_PROPERTY_REPLICA_EXCEPTION, e.getMessage()); - if (cause instanceof OzoneChecksumException) { - try (InputStream is = getReplica( - blockReplicasWithoutChecksum, datanode)) { - Files.copy(is, path, StandardCopyOption.REPLACE_EXISTING); - } - } } replicasJson.add(replicaJson); } blockJson.set(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson); blocks.add(blockJson); - - IOUtils.close(log, blockReplicasWithoutChecksum.values()); - } - } - - private Map replicasOf(BlockID blockID, - Map> replicas) { - for (Map.Entry> - block : replicas.entrySet()) { - if (block.getKey().getBlockID().equals(blockID)) { - return block.getValue(); - } } - return emptyMap(); - } - - private InputStream getReplica( - Map replicas, DatanodeDetails datanode - ) { - InputStream input = replicas.remove(datanode); - return input != null ? input : new ByteArrayInputStream(new byte[0]); } @Nonnull @@ -185,21 +140,8 @@ public void verifyKey(OzoneKeyDetails keyDetails) { String bucketName = keyDetails.getBucketName(); String keyName = keyDetails.getName(); System.out.println("Processing key : " + volumeName + "/" + bucketName + "/" + keyName); - boolean isChecksumVerifyEnabled = ozoneConfiguration.getBoolean("ozone.client.verify.checksum", true); - RpcClient newClient = null; try { - OzoneConfiguration configuration = new OzoneConfiguration(ozoneConfiguration); - configuration.setBoolean("ozone.client.verify.checksum", !isChecksumVerifyEnabled); - newClient = getClient(isChecksumVerifyEnabled); - ClientProtocol noChecksumClient; - ClientProtocol checksumClient; - if (isChecksumVerifyEnabled) { - checksumClient = client.getObjectStore().getClientProxy(); - noChecksumClient = newClient; - } else { - checksumClient = newClient; - noChecksumClient = client.getObjectStore().getClientProxy(); - } + ClientProtocol checksumClient = client.getObjectStore().getClientProxy(); // Multilevel keys will have a '/' in their names. This interferes with // directory and file creation process. Flatten the keys to fix this. @@ -209,15 +151,13 @@ public void verifyKey(OzoneKeyDetails keyDetails) { OzoneKeyDetails keyInfoDetails = checksumClient.getKeyDetails(volumeName, bucketName, keyName); Map> replicas = checksumClient.getKeysEveryReplicas(volumeName, bucketName, keyName); - Map> replicasWithoutChecksum = - noChecksumClient.getKeysEveryReplicas(volumeName, bucketName, keyName); ObjectNode result = JsonUtils.createObjectNode(null); result.put(JSON_PROPERTY_FILE_NAME, volumeName + "/" + bucketName + "/" + keyName); result.put(JSON_PROPERTY_FILE_SIZE, keyInfoDetails.getDataSize()); ArrayNode blocks = JsonUtils.createArrayNode(); - downloadReplicasAndCreateManifest(sanitizedKeyName, replicas, replicasWithoutChecksum, dir, blocks); + downloadReplicasAndCreateManifest(replicas, blocks); result.set(JSON_PROPERTY_FILE_BLOCKS, blocks); String prettyJson = JsonUtils.toJsonStringWithDefaultPrettyPrinter(result); @@ -229,15 +169,4 @@ public void verifyKey(OzoneKeyDetails keyDetails) { throw new RuntimeException(e); } } - - private RpcClient getClient(boolean isChecksumVerifyEnabled) throws IOException { - if (rpcClient != null) { - return rpcClient; - } - - OzoneConfiguration configuration = new OzoneConfiguration(ozoneConfiguration); - configuration.setBoolean("ozone.client.verify.checksum", !isChecksumVerifyEnabled); - rpcClient = new RpcClient(configuration, null); - return rpcClient; - } } From 3271d9f103ba633d221916378caf8b5160b2578e Mon Sep 17 00:00:00 2001 From: Rishabh Patel <1607531+ptlrs@users.noreply.github.com> Date: Mon, 24 Mar 2025 22:56:31 -0700 Subject: [PATCH 3/4] Delete hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestReplicasCli.java --- .../hadoop/ozone/debug/TestReplicasCli.java | 367 ------------------ 1 file changed, 367 deletions(-) delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestReplicasCli.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestReplicasCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestReplicasCli.java deleted file mode 100644 index 7f264dca3245..000000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestReplicasCli.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.debug; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Collections.singletonMap; -import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; -import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -import java.io.File; -import java.io.IOException; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.security.MessageDigest; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.ClientConfigForTesting; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.audit.AuditLogTestUtils; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.debug.replicas.ReplicasDebug; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.ozone.test.GenericTestUtils; -import org.apache.ozone.test.OzoneTestBase; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.MethodOrderer; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestMethodOrder; -import org.junit.jupiter.api.io.TempDir; -import picocli.CommandLine; - -@TestMethodOrder(MethodOrderer.MethodName.class) -public class TestReplicasCli extends OzoneTestBase { - private static MiniOzoneCluster cluster = null; - private static OzoneClient ozClient = null; - private static ObjectStore store = null; - private static OzoneManager ozoneManager; - private static StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private static MessageDigest eTagProvider; - private static Set ozoneClients = new HashSet<>(); - private static GenericTestUtils.PrintStreamCapturer output; - @TempDir - private File tempDir; - private StringWriter stdout, stderr; - private PrintWriter pstdout, pstderr; - private CommandLine cmd; - - @BeforeAll - public static void initialize() throws Exception { - eTagProvider = MessageDigest.getInstance(MD5_HASH); - AuditLogTestUtils.enableAuditLog(); - output = GenericTestUtils.captureOut(); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); - conf.setBoolean(OzoneConfigKeys.OZONE_ACL_ENABLED, true); - conf.set(OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS, OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE); - startCluster(conf); - } - - @BeforeEach - public void setup() throws IOException { - stdout = new StringWriter(); - pstdout = new PrintWriter(stdout); - stderr = new StringWriter(); - pstderr = new PrintWriter(stderr); - - cmd = new CommandLine(new ReplicasDebug()) - .setOut(pstdout) - .setErr(pstderr); - } - - @AfterEach - public void shutdown() throws IOException { - pstderr.close(); - stderr.close(); - pstdout.close(); - stdout.close(); - } - - @AfterAll - public static void teardown() throws IOException { - shutdownCluster(); - } - - /** - * Create a MiniOzoneCluster for testing. - * @param conf Configurations to start the cluster. - */ - static void startCluster(OzoneConfiguration conf) throws Exception { - startCluster(conf, MiniOzoneCluster.newBuilder(conf)); - } - - static void startCluster(OzoneConfiguration conf, MiniOzoneCluster.Builder builder) throws Exception { - // Reduce long wait time in MiniOzoneClusterImpl#waitForHddsDatanodesStop - // for testZReadKeyWithUnhealthyContainerReplica. - conf.set("ozone.scm.stale.node.interval", "1s"); - conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); - - ClientConfigForTesting.newBuilder(StorageUnit.MB) - .setDataStreamMinPacketSize(1) - .applyTo(conf); - - cluster = builder - .setNumDatanodes(14) - .build(); - cluster.waitForClusterToBeReady(); - ozClient = OzoneClientFactory.getRpcClient(conf); - ozoneClients.add(ozClient); - store = ozClient.getObjectStore(); - storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); - ozoneManager = cluster.getOzoneManager(); - } - - /** - * Close OzoneClient and shutdown MiniOzoneCluster. - */ - static void shutdownCluster() { - org.apache.hadoop.hdds.utils.IOUtils.closeQuietly(ozoneClients); - ozoneClients.clear(); - org.apache.hadoop.hdds.utils.IOUtils.closeQuietly(output); - - if (storageContainerLocationClient != null) { - storageContainerLocationClient.close(); - } - - if (cluster != null) { - cluster.shutdown(); - } - } - - private OzoneKeyDetails createTestKey(OzoneBucket bucket) throws IOException { - return createTestKey(bucket, getTestName(), UUID.randomUUID().toString()); - } - - private OzoneKeyDetails createTestKey( - OzoneBucket bucket, String keyName, String keyValue - ) throws IOException { - return createTestKey(bucket, keyName, keyValue.getBytes(UTF_8)); - } - - private OzoneKeyDetails createTestKey( - OzoneBucket bucket, String keyName, byte[] bytes - ) throws IOException { - RatisReplicationConfig replication = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE); - Map metadata = singletonMap("key", RandomStringUtils.randomAscii(10)); - try (OzoneOutputStream out = bucket.createKey(keyName, bytes.length, replication, metadata)) { - out.write(bytes); - } - OzoneKeyDetails key = bucket.getKey(keyName); - assertNotNull(key); - assertEquals(keyName, key.getName()); - return key; - } - - private String generateKeys() - throws IOException { - String volumeA = "vol-a-" + RandomStringUtils.randomNumeric(5); - String bucketA = "buc-a-" + RandomStringUtils.randomNumeric(5); - String bucketB = "buc-b-" + RandomStringUtils.randomNumeric(5); - store.createVolume(volumeA); - OzoneVolume volA = store.getVolume(volumeA); - volA.createBucket(bucketA); - volA.createBucket(bucketB); - OzoneBucket volAbucketA = volA.getBucket(bucketA); - OzoneBucket volAbucketB = volA.getBucket(bucketB); - - /* - Create 10 keys in vol-a-/buc-a-, - vol-a-/buc-b-, vol-b-/buc-a- and - vol-b-/buc-b- - */ - String keyBaseA = "key-a-"; - for (int i = 0; i < 10; i++) { - byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); - OzoneOutputStream one = volAbucketA.createKey( - keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, RATIS, ONE, - new HashMap<>()); - one.write(value); - one.close(); - OzoneOutputStream two = volAbucketB.createKey( - keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, RATIS, ONE, - new HashMap<>()); - two.write(value); - two.close(); - } - - /* - Create 10 keys in vol-a-/buc-a-, - vol-a-/buc-b-, vol-b-/buc-a- and - vol-b-/buc-b- - */ - String keyBaseB = "level1/key-b-"; - for (int i = 0; i < 10; i++) { - byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); - OzoneOutputStream one = volAbucketA.createKey( - keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, RATIS, ONE, - new HashMap<>()); - one.write(value); - one.close(); - OzoneOutputStream two = volAbucketB.createKey( - keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, RATIS, ONE, - new HashMap<>()); - two.write(value); - two.close(); - } - - Iterator volABucketAIter = volAbucketA.listKeys("key-"); - int volABucketAKeyCount = 0; - while (volABucketAIter.hasNext()) { - volABucketAIter.next(); - volABucketAKeyCount++; - } - assertEquals(10, volABucketAKeyCount); - Iterator volABucketBIter = volAbucketB.listKeys("key-"); - int volABucketBKeyCount = 0; - while (volABucketBIter.hasNext()) { - volABucketBIter.next(); - volABucketBKeyCount++; - } - assertEquals(10, volABucketBKeyCount); - - - Iterator volABucketAKeyAIter = volAbucketA.listKeys("key-a-"); - int volABucketAKeyACount = 0; - while (volABucketAKeyAIter.hasNext()) { - volABucketAKeyAIter.next(); - volABucketAKeyACount++; - } - assertEquals(10, volABucketAKeyACount); - Iterator volABucketAKeyBIter = volAbucketA.listKeys("level1"); - volABucketAKeyBIter.next(); - for (int i = 0; i < 10; i++) { - OzoneKey key = volABucketAKeyBIter.next(); - assertTrue(key.getName().startsWith("level1/key-b-" + i + "-")); - } - assertFalse(volABucketBIter.hasNext()); - - return "/" + volumeA+ "/" + bucketA; - } - - @Test - public void testReplicas() throws Exception { - String volumeA = "vol-a-" + RandomStringUtils.randomNumeric(5); - String bucketA = "buc-a-" + RandomStringUtils.randomNumeric(5); - String bucketB = "buc-b-" + RandomStringUtils.randomNumeric(5); - store.createVolume(volumeA); - OzoneVolume volA = store.getVolume(volumeA); - volA.createBucket(bucketA); - volA.createBucket(bucketB); - OzoneBucket volAbucketA = volA.getBucket(bucketA); - OzoneBucket volAbucketB = volA.getBucket(bucketB); - - /* - Create 10 keys in vol-a-/buc-a-, - vol-a-/buc-b-, vol-b-/buc-a- and - vol-b-/buc-b- - */ - String keyBaseA = "key-a-"; - for (int i = 0; i < 10; i++) { - byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); - OzoneOutputStream one = volAbucketA.createKey( - keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, RATIS, ONE, - new HashMap<>()); - one.write(value); - one.close(); - OzoneOutputStream two = volAbucketB.createKey( - keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, RATIS, ONE, - new HashMap<>()); - two.write(value); - two.close(); - } - - /* - Create 10 keys in vol-a-/buc-a-, - vol-a-/buc-b-, vol-b-/buc-a- and - vol-b-/buc-b- - */ - String keyBaseB = "level1/key-b-"; - for (int i = 0; i < 10; i++) { - byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); - OzoneOutputStream one = volAbucketA.createKey( - keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, RATIS, ONE, - new HashMap<>()); - one.write(value); - one.close(); - OzoneOutputStream two = volAbucketB.createKey( - keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, RATIS, ONE, - new HashMap<>()); - two.write(value); - two.close(); - } - - String path = "/"; - // Prepare scan args - List completeScanArgs = new ArrayList<>(); - completeScanArgs.addAll(Arrays.asList( - "verify", - "--checksums", - "--output-dir", tempDir.getAbsolutePath(), - path - )); - - int exitCode = cmd.execute(completeScanArgs.toArray(new String[0])); - assertEquals(0, exitCode, stderr.toString()); - System.out.println(stdout); - } -} - -//TODO: org.apache.hadoop.ozone.dn.scanner.TestContainerScannerIntegrationAbstract.ContainerCorruptions.corruptFile -// see this class to corrupt the blocks and containers and see what type of exceptions are thrown - - From 033b08df8d1f82ea145f9d036e0fae5cf5df1193 Mon Sep 17 00:00:00 2001 From: Rishabh Patel Date: Mon, 24 Mar 2025 23:17:28 -0700 Subject: [PATCH 4/4] HDDS-12661. fix findbugs error --- .../apache/hadoop/ozone/debug/replicas/Checksums.java | 10 +--------- .../hadoop/ozone/debug/replicas/ReplicasVerify.java | 2 +- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/Checksums.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/Checksums.java index 52eec96a21fe..38f6f0103017 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/Checksums.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/Checksums.java @@ -30,16 +30,13 @@ import java.util.Map; import org.apache.commons.io.IOUtils; import org.apache.commons.io.output.NullOutputStream; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.slf4j.Logger; /** * Class that downloads every replica for all the blocks associated with a @@ -63,16 +60,11 @@ public class Checksums implements ReplicaVerifier { private String outputDir; - private RpcClient rpcClient = null; private OzoneClient client; - private Logger log; - private OzoneConfiguration ozoneConfiguration; - public Checksums(OzoneClient client, String outputDir, Logger log, OzoneConfiguration conf) { + public Checksums(OzoneClient client, String outputDir) { this.client = client; this.outputDir = outputDir; - this.log = log; - this.ozoneConfiguration = conf; } private void downloadReplicasAndCreateManifest( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasVerify.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasVerify.java index a5d474219e89..dcdaebc0d6fe 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasVerify.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasVerify.java @@ -77,7 +77,7 @@ protected void execute(OzoneClient client, OzoneAddress address) throws IOExcept replicaVerifiers = new ArrayList<>(); if (verification.doExecuteChecksums) { - replicaVerifiers.add(new Checksums(client, outputDir, LOG, getConf())); + replicaVerifiers.add(new Checksums(client, outputDir)); } if (verification.doExecutePadding) {