From b747ee582bdb3a1ddc928d249084ac8d1fe92a33 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Fri, 7 Mar 2025 19:31:11 +0100 Subject: [PATCH 01/29] HDDS-12493. Move container upgrade under repair --- hadoop-hdds/tools/pom.xml | 6 -- .../scm/cli/container/ContainerCommands.java | 1 - .../main/smoketest/admincli/container.robot | 1 - .../shell/TestOzoneContainerUpgradeShell.java | 78 ++++++----------- hadoop-ozone/tools/pom.xml | 12 +++ .../ozone/repair/datanode/DatanodeRepair.java | 36 ++++++++ .../ozone/repair/datanode/package-info.java | 21 +++++ .../schemaupgrade}/UpgradeChecker.java | 2 +- .../UpgradeContainerSchemaSubcommand.java | 85 +++++-------------- .../schemaupgrade}/UpgradeManager.java | 2 +- .../datanode/schemaupgrade}/UpgradeTask.java | 2 +- .../datanode/schemaupgrade}/UpgradeUtils.java | 2 +- .../datanode/schemaupgrade}/package-info.java | 2 +- .../schemaupgrade}/TestUpgradeManager.java | 2 +- 14 files changed, 123 insertions(+), 129 deletions(-) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/DatanodeRepair.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/package-info.java rename {hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade => hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade}/UpgradeChecker.java (98%) rename hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java => hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java (67%) rename {hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade => hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade}/UpgradeManager.java (98%) rename {hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade => hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade}/UpgradeTask.java (99%) rename {hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade => hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade}/UpgradeUtils.java (98%) rename {hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade => hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade}/package-info.java (93%) rename {hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade => hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade}/TestUpgradeManager.java (99%) diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 07ffa537bb04..769f8411f465 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -134,12 +134,6 @@ test-jar test - - org.apache.ozone - hdds-container-service - test-jar - test - org.apache.ozone hdds-test-utils diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index 2a2eec90ffef..42304896febf 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -36,7 +36,6 @@ CreateSubcommand.class, CloseSubcommand.class, ReportSubcommand.class, - UpgradeSubcommand.class }) @MetaInfServices(AdminSubcommand.class) public class ContainerCommands implements AdminSubcommand { diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot index 564fd1f5d699..0a37acb86b0a 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot @@ -111,7 +111,6 @@ Incomplete command Should contain ${output} create Should contain ${output} close Should contain ${output} report - Should contain ${output} upgrade #List containers on unknown host # ${output} = Execute And Ignore Error ozone admin --verbose container list --scm unknown-host diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java index 75ddd145d96d..ec1c7e49eea5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java @@ -22,18 +22,18 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED; +import static org.apache.ozone.test.GenericTestUtils.captureErr; +import static org.apache.ozone.test.IntLambda.withTextFromSystemIn; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import com.google.common.base.Preconditions; import java.io.File; import java.io.IOException; -import java.io.PrintWriter; -import java.io.StringWriter; import java.nio.charset.StandardCharsets; import java.time.Duration; import java.util.ArrayList; @@ -41,12 +41,9 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.cli.container.ContainerCommands; -import org.apache.hadoop.hdds.scm.cli.container.UpgradeSubcommand; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; @@ -59,13 +56,15 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneTestUtils; import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.repair.OzoneRepair; +import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -80,21 +79,12 @@ public class TestOzoneContainerUpgradeShell { LoggerFactory.getLogger(TestOzoneContainerUpgradeShell.class); private static MiniOzoneCluster cluster = null; private static OzoneClient client; - private static OzoneConfiguration conf = null; private static final String VOLUME_NAME = UUID.randomUUID().toString(); private static final String BUCKET_NAME = UUID.randomUUID().toString(); - protected static void startCluster() throws Exception { - cluster = MiniOzoneCluster.newBuilder(conf) - .build(); - cluster.waitForClusterToBeReady(); - client = cluster.newClient(); - } - @BeforeAll public static void init() throws Exception { - conf = new OzoneConfiguration(); - conf.set(OZONE_ADMINISTRATORS, "*"); + OzoneConfiguration conf = new OzoneConfiguration(); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); @@ -110,7 +100,10 @@ public static void init() throws Exception { // gen schema v2 container conf.setBoolean(CONTAINER_SCHEMA_V3_ENABLED, false); - startCluster(); + cluster = MiniOzoneCluster.newBuilder(conf) + .build(); + cluster.waitForClusterToBeReady(); + client = cluster.newClient(); } public List getDatanodeConfigs() { @@ -141,32 +134,23 @@ public void testNormalContainerUpgrade() throws Exception { shutdownCluster(); // datanode1 test check all pass & upgrade success - UpgradeSubcommand.setOzoneConfiguration(datanodeConf); - StringWriter stdout = new StringWriter(); - PrintWriter pstdout = new PrintWriter(stdout); - CommandLine commandLine = upgradeCommand(pstdout); - - String[] args = new String[]{"upgrade", "--yes"}; - int exitCode = commandLine.execute(args); + int exitCode = runUpgrade(datanodeConf); assertEquals(0, exitCode); - // datanode2 NodeOperationalState is IN_SERVICE upgrade fail. - OzoneConfiguration datanode2Conf = datanodeConfigs.get(1); - UpgradeSubcommand.setOzoneConfiguration(datanode2Conf); - StringWriter stdout2 = new StringWriter(); - PrintWriter pstdout2 = new PrintWriter(stdout2); - CommandLine commandLine2 = upgradeCommand(pstdout2); - - String[] args2 = new String[]{"upgrade", "--yes"}; - int exit2Code = commandLine2.execute(args2); + GenericTestUtils.PrintStreamCapturer err = captureErr(); + // datanode2 NodeOperationalState is IN_SERVICE upgrade fail. + int exit2Code = runUpgrade(datanodeConfigs.get(1)); assertEquals(0, exit2Code); - String cmdOut = stdout2.toString(); - assertThat(cmdOut).contains("IN_MAINTENANCE"); + assertThat(err.get()).contains("IN_MAINTENANCE"); } - private CommandLine upgradeCommand(PrintWriter pstdout) { - return new CommandLine(new ContainerCommands()).setOut(pstdout); + private static int runUpgrade(OzoneConfiguration conf) { + CommandLine cmd = new OzoneRepair().getCmd(); + return withTextFromSystemIn("y") + .execute(() -> cmd.execute( + "-D", OZONE_METADATA_DIRS + "=" + conf.get(OZONE_METADATA_DIRS), + "datanode", "upgrade-container-schema", "--yes")); } private static ContainerInfo writeKeyAndCloseContainer() throws Exception { @@ -176,12 +160,8 @@ private static ContainerInfo writeKeyAndCloseContainer() throws Exception { } private static void writeKey(String keyName) throws IOException { - try (OzoneClient client = OzoneClientFactory.getRpcClient(conf)) { - TestDataUtil.createVolumeAndBucket(client, VOLUME_NAME, BUCKET_NAME); - TestDataUtil.createKey( - client.getObjectStore().getVolume(VOLUME_NAME).getBucket(BUCKET_NAME), - keyName, "test".getBytes(StandardCharsets.UTF_8)); - } + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, VOLUME_NAME, BUCKET_NAME); + TestDataUtil.createKey(bucket, keyName, "test".getBytes(StandardCharsets.UTF_8)); } private static ContainerInfo closeContainerForKey(String keyName) @@ -207,18 +187,10 @@ public static void shutdownCluster() throws InterruptedException { try { IOUtils.closeQuietly(client); if (cluster != null) { - List dnConfigs = cluster.getHddsDatanodes().stream() - .map(HddsDatanodeService::getConf).collect(Collectors.toList()); - DatanodeStoreCache.setMiniClusterMode(false); cluster.stop(); - ContainerCache.getInstance(conf).shutdownCache(); - - - for (OzoneConfiguration dnConfig : dnConfigs) { - ContainerCache.getInstance(dnConfig).shutdownCache(); - } + ContainerCache.getInstance(cluster.getConf()).shutdownCache(); DefaultMetricsSystem.shutdown(); ManagedRocksObjectMetrics.INSTANCE.assertNoLeaks(); CodecTestUtil.gc(); diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index a90ab529dd02..8838388ae028 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -254,6 +254,18 @@ + + org.apache.ozone + hdds-common + test-jar + test + + + org.apache.ozone + hdds-container-service + test-jar + test + org.apache.ozone hdds-test-utils diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/DatanodeRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/DatanodeRepair.java new file mode 100644 index 000000000000..57e0b50b7e2d --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/DatanodeRepair.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.datanode; + +import org.apache.hadoop.hdds.cli.RepairSubcommand; +import org.apache.hadoop.ozone.repair.datanode.schemaupgrade.UpgradeContainerSchemaSubcommand; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +/** + * Ozone Repair CLI for Datanode. + */ +@CommandLine.Command(name = "datanode", + subcommands = { + UpgradeContainerSchemaSubcommand.class, + }, + description = "Tools to repair Datanode") +@MetaInfServices(RepairSubcommand.class) +public class DatanodeRepair implements RepairSubcommand { + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/package-info.java new file mode 100644 index 000000000000..8ccbb882dd35 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Repair tools for Datanode. + */ +package org.apache.hadoop.ozone.repair.datanode; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeChecker.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java similarity index 98% rename from hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeChecker.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java index 6cdd230c987e..92bdcb04e5fb 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeChecker.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.scm.cli.container.upgrade; +package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java similarity index 67% rename from hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java index b350bc729a55..498a53540732 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java @@ -15,9 +15,8 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.scm.cli.container; +package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import java.io.File; import java.io.InputStreamReader; @@ -25,24 +24,16 @@ import java.util.Iterator; import java.util.List; import java.util.Scanner; -import java.util.concurrent.Callable; import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.cli.container.upgrade.UpgradeChecker; -import org.apache.hadoop.hdds.scm.cli.container.upgrade.UpgradeManager; -import org.apache.hadoop.hdds.scm.cli.container.upgrade.UpgradeUtils; -import org.apache.hadoop.hdds.server.OzoneAdmins; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.ozone.common.Storage; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.ozone.repair.RepairTool; import picocli.CommandLine; import picocli.CommandLine.Command; @@ -50,15 +41,12 @@ * This is the handler that process container upgrade command. */ @Command( - name = "upgrade", + name = "upgrade-container-schema", description = "Offline upgrade all schema V2 containers to schema V3 " + "for this datanode.", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class UpgradeSubcommand extends AbstractSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(UpgradeSubcommand.class); +public class UpgradeContainerSchemaSubcommand extends RepairTool { @CommandLine.Option(names = {"--volume"}, required = false, @@ -69,27 +57,16 @@ public class UpgradeSubcommand extends AbstractSubcommand implements Callable pair = upgradeChecker.checkDatanodeRunning(); - final boolean isRunning = pair.getKey(); - if (isRunning) { - out().println(pair.getValue()); - return null; - } DatanodeDetails dnDetail = UpgradeUtils.getDatanodeDetails(configuration); @@ -103,31 +80,28 @@ public Void call() throws Exception { if (metadataLayoutFeature.layoutVersion() < needLayoutVersion || softwareLayoutFeature.layoutVersion() < needLayoutVersion) { - out().println(String.format( + error( "Please upgrade your software version, no less than %s," + " current metadata layout version is %s," + " software layout version is %s", HDDSLayoutFeature.DATANODE_SCHEMA_V3.name(), - metadataLayoutFeature.name(), softwareLayoutFeature.name())); - return null; + metadataLayoutFeature.name(), softwareLayoutFeature.name()); + return; } if (!Strings.isNullOrEmpty(volume)) { File volumeDir = new File(volume); if (!volumeDir.exists() || !volumeDir.isDirectory()) { - out().println( - String.format("Volume path %s is not a directory or doesn't exist", - volume)); - return null; + error("Volume path %s is not a directory or doesn't exist", volume); + return; } File hddsRootDir = new File(volume + "/" + HddsVolume.HDDS_VOLUME_DIR); File versionFile = new File(volume + "/" + HddsVolume.HDDS_VOLUME_DIR + "/" + Storage.STORAGE_FILE_VERSION); if (!hddsRootDir.exists() || !hddsRootDir.isDirectory() || !versionFile.exists() || !versionFile.isFile()) { - out().println( - String.format("Volume path %s is not a valid data volume", volume)); - return null; + error("Volume path %s is not a valid data volume", volume); + return; } configuration.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volume); } @@ -136,10 +110,10 @@ public Void call() throws Exception { dnDetail.getPersistedOpState(); if (!opState.equals(HddsProtos.NodeOperationalState.IN_MAINTENANCE)) { - out().println("This command requires the datanode's " + + error("This command requires the datanode's " + "NodeOperationalState to be IN_MAINTENANCE, currently is " + opState); - return null; + return; } List allVolume = @@ -149,15 +123,15 @@ public Void call() throws Exception { while (volumeIterator.hasNext()) { HddsVolume hddsVolume = volumeIterator.next(); if (UpgradeChecker.isAlreadyUpgraded(hddsVolume)) { - out().println("Volume " + hddsVolume.getVolumeRootDir() + + info("Volume " + hddsVolume.getVolumeRootDir() + " is already upgraded, skip it."); volumeIterator.remove(); } } if (allVolume.isEmpty()) { - out().println("There is no more volume to upgrade. Exit."); - return null; + info("There is no more volume to upgrade. Exit."); + return; } if (!yes) { @@ -169,25 +143,12 @@ public Void call() throws Exception { boolean confirm = scanner.next().trim().equals("yes"); scanner.close(); if (!confirm) { - return null; + return; } } // do upgrade final UpgradeManager upgradeManager = new UpgradeManager(); upgradeManager.run(configuration, allVolume); - return null; - } - - @VisibleForTesting - public static void setOzoneConfiguration(OzoneConfiguration config) { - ozoneConfiguration = config; - } - - private OzoneConfiguration getConfiguration() { - if (ozoneConfiguration == null) { - ozoneConfiguration = new OzoneConfiguration(); - } - return ozoneConfiguration; } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java similarity index 98% rename from hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java index 5d87fde7533e..8d7a0041bbdd 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.scm.cli.container.upgrade; +package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java similarity index 99% rename from hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java index 4ca6186336b5..4ccb63f6dd30 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.scm.cli.container.upgrade; +package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java similarity index 98% rename from hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeUtils.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java index 567fd6df48cc..14ca387486a8 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.scm.cli.container.upgrade; +package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/package-info.java similarity index 93% rename from hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/package-info.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/package-info.java index c11a284cb206..9c0d31902f64 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/package-info.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/package-info.java @@ -18,4 +18,4 @@ /** * Contains all of the container related scm commands. */ -package org.apache.hadoop.hdds.scm.cli.container.upgrade; +package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java similarity index 99% rename from hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java rename to hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java index 30f426823806..fd005f6306f0 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.scm.cli.container.upgrade; +package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; From 992e8990b11a328c8ed0d066aba59ea1ba7e2602 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 06:55:28 +0100 Subject: [PATCH 02/29] remove confirmation --- .../shell/TestOzoneContainerUpgradeShell.java | 2 +- .../schemaupgrade/UpgradeChecker.java | 35 ------------------- .../UpgradeContainerSchemaSubcommand.java | 20 ----------- 3 files changed, 1 insertion(+), 56 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java index ec1c7e49eea5..58e78bbbd58c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java @@ -150,7 +150,7 @@ private static int runUpgrade(OzoneConfiguration conf) { return withTextFromSystemIn("y") .execute(() -> cmd.execute( "-D", OZONE_METADATA_DIRS + "=" + conf.get(OZONE_METADATA_DIRS), - "datanode", "upgrade-container-schema", "--yes")); + "datanode", "upgrade-container-schema")); } private static ContainerInfo writeKeyAndCloseContainer() throws Exception { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java index 92bdcb04e5fb..79d70d7c0507 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java @@ -20,7 +20,6 @@ import java.io.File; import java.io.IOException; import java.util.List; -import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -37,40 +36,6 @@ */ public class UpgradeChecker { - /* - * Verify that the datanode is in the shutdown state or running. - */ - public Pair checkDatanodeRunning() { - String command = - "ps aux | grep org.apache.hadoop.ozone.HddsDatanodeService " + - "| grep -v grep"; - try { - Process exec = Runtime.getRuntime().exec(new String[]{"/bin/bash", "-c", - command}); - boolean notTimeout = exec.waitFor(10, TimeUnit.SECONDS); - if (!notTimeout) { - return Pair.of(true, - String.format("Execution of the command '%s' timeout", command)); - } - if (exec.exitValue() == 0) { - return Pair.of(true, "HddsDatanodeService is running." + - " This upgrade command requires datanode to be off and in" + - " the IN_MAINTENANCE mode. Please put the datanode in" + - " the desired state first, then try this command later again."); - } else if (exec.exitValue() == 1) { - return Pair.of(false, "HddsDatanodeService is not running."); - } else { - return Pair.of(true, - String.format("Return code of the command '%s' is %d", command, - exec.exitValue())); - } - } catch (IOException | InterruptedException e) { - return Pair.of(true, - String.format("Run command '%s' has error '%s'", - command, e.getMessage())); - } - } - public Pair getLayoutFeature( DatanodeDetails dnDetail, OzoneConfiguration conf) throws IOException { DatanodeLayoutStorage layoutStorage = diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java index 498a53540732..5f6e45846c46 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java @@ -19,11 +19,8 @@ import com.google.common.base.Strings; import java.io.File; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.List; -import java.util.Scanner; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -53,10 +50,6 @@ public class UpgradeContainerSchemaSubcommand extends RepairTool { description = "volume path") private String volume; - @CommandLine.Option(names = {"-y", "--yes"}, - description = "Continue without interactive user confirmation") - private boolean yes; - @Override protected Component serviceToBeOffline() { return Component.DATANODE; @@ -134,19 +127,6 @@ public void execute() throws Exception { return; } - if (!yes) { - Scanner scanner = new Scanner(new InputStreamReader( - System.in, StandardCharsets.UTF_8)); - System.out.println( - "All volume db stores will be automatically backup," + - " should we continue the upgrade ? [yes|no] : "); - boolean confirm = scanner.next().trim().equals("yes"); - scanner.close(); - if (!confirm) { - return; - } - } - // do upgrade final UpgradeManager upgradeManager = new UpgradeManager(); upgradeManager.run(configuration, allVolume); From e65ebf3a8d9fdf112a227a91dce69abad4e4cbe1 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 07:20:19 +0100 Subject: [PATCH 03/29] get rid of shared volumeStoreMap --- .../UpgradeContainerSchemaSubcommand.java | 3 +-- .../schemaupgrade/UpgradeManager.java | 23 +++++++++---------- .../datanode/schemaupgrade/UpgradeTask.java | 9 ++------ .../schemaupgrade/TestUpgradeManager.java | 20 ++++++---------- 4 files changed, 21 insertions(+), 34 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java index 5f6e45846c46..c3f0f97b4842 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java @@ -128,7 +128,6 @@ public void execute() throws Exception { } // do upgrade - final UpgradeManager upgradeManager = new UpgradeManager(); - upgradeManager.run(configuration, allVolume); + UpgradeManager.run(configuration, allVolume); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java index 8d7a0041bbdd..e61cb0acd539 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java @@ -17,14 +17,12 @@ package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; -import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; @@ -41,10 +39,7 @@ public class UpgradeManager { public static final Logger LOG = LoggerFactory.getLogger(UpgradeManager.class); - private final Map - volumeStoreMap = new ConcurrentHashMap<>(); - - public List run(OzoneConfiguration configuration, + public static List run(OzoneConfiguration configuration, List volumes) throws IOException { List results = new ArrayList<>(); Map> volumeFutures = new HashMap<>(); @@ -54,7 +49,7 @@ public List run(OzoneConfiguration configuration, for (StorageVolume volume : volumes) { final HddsVolume hddsVolume = (HddsVolume) volume; final UpgradeTask task = - new UpgradeTask(configuration, hddsVolume, volumeStoreMap); + new UpgradeTask(configuration, hddsVolume); final CompletableFuture future = task.getUpgradeFuture(); volumeFutures.put(hddsVolume, future); } @@ -80,11 +75,6 @@ public List run(OzoneConfiguration configuration, return results; } - @VisibleForTesting - public DatanodeStoreSchemaThreeImpl getDBStore(HddsVolume volume) { - return volumeStoreMap.get(volume.getStorageDir().getAbsolutePath()); - } - /** * This class contains v2 to v3 container upgrade result. */ @@ -95,6 +85,7 @@ public static class Result { private long endTimeMs = 0L; private Exception e = null; private Status status = Status.FAIL; + private DatanodeStoreSchemaThreeImpl store; public Result(HddsVolume hddsVolume) { this.hddsVolume = hddsVolume; @@ -108,6 +99,14 @@ public long getCost() { return endTimeMs - startTimeMs; } + DatanodeStoreSchemaThreeImpl getDBStore() { + return store; + } + + void setDBStore(DatanodeStoreSchemaThreeImpl store) { + this.store = store; + } + public void setResultList( List resultList) { resultMap = new HashMap<>(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java index 4ccb63f6dd30..3ddf9a6f77ee 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java @@ -26,7 +26,6 @@ import java.util.ArrayList; import java.util.Date; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; import org.apache.commons.io.FileUtils; @@ -69,7 +68,6 @@ public class UpgradeTask { private final ConfigurationSource config; private final HddsVolume hddsVolume; private DatanodeStoreSchemaThreeImpl dataStore; - private final Map volumeStoreMap; private static final String BACKUP_CONTAINER_DATA_FILE_SUFFIX = ".backup"; public static final String UPGRADE_COMPLETE_FILE_NAME = "upgrade.complete"; @@ -79,11 +77,9 @@ public class UpgradeTask { (new DatanodeSchemaTwoDBDefinition("", new OzoneConfiguration())) .getMap().keySet(); - public UpgradeTask(ConfigurationSource config, HddsVolume hddsVolume, - Map storeMap) { + public UpgradeTask(ConfigurationSource config, HddsVolume hddsVolume) { this.config = config; this.hddsVolume = hddsVolume; - this.volumeStoreMap = storeMap; } public CompletableFuture getUpgradeFuture() { @@ -159,8 +155,7 @@ public CompletableFuture getUpgradeFuture() { RawDB db = DatanodeStoreCache.getInstance().getDB( volumeDBPath.getAbsolutePath(), config); dataStore = (DatanodeStoreSchemaThreeImpl) db.getStore(); - volumeStoreMap.put( - hddsVolume.getStorageDir().getAbsolutePath(), dataStore); + result.setDBStore(dataStore); } catch (IOException e) { result.fail(new Exception( "Failed to load db for volume " + hddsVolume.getVolumeRootDir() + diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java index fd005f6306f0..8ef2699e2bfb 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java @@ -158,12 +158,11 @@ public void testUpgrade() throws IOException { shutdownAllVolume(); - final UpgradeManager upgradeManager = new UpgradeManager(); final List results = - upgradeManager.run(CONF, + UpgradeManager.run(CONF, StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList())); - checkV3MetaData(keyValueContainerBlockDataMap, results, upgradeManager); + checkV3MetaData(keyValueContainerBlockDataMap, results); } private Map putAnyBlockData(KeyValueContainerData data, @@ -267,26 +266,21 @@ public void shutdownAllVolume() { } private void checkV3MetaData(Map> blockDataMap, List results, - UpgradeManager upgradeManager) throws IOException { - Map resultMap = new HashMap<>(); + Map> blockDataMap, List results) throws IOException { + Map volumeResults = new HashMap<>(); for (UpgradeManager.Result result : results) { - resultMap.putAll(result.getResultMap()); + result.getResultMap().forEach((k, v) -> volumeResults.put(k, result)); } for (Map.Entry> entry : blockDataMap.entrySet()) { final KeyValueContainerData containerData = entry.getKey(); final Map blockKeyValue = entry.getValue(); - - final UpgradeTask.UpgradeContainerResult result = - resultMap.get(containerData.getContainerID()); - final KeyValueContainerData v3ContainerData = - (KeyValueContainerData) result.getNewContainerData(); + Long containerID = containerData.getContainerID(); final DatanodeStoreSchemaThreeImpl datanodeStoreSchemaThree = - upgradeManager.getDBStore(v3ContainerData.getVolume()); + volumeResults.get(containerID).getDBStore(); final Table blockDataTable = datanodeStoreSchemaThree.getBlockDataTable(); From b9de4f77dbcdc088008ce3e19294a1d94c88f9f3 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 07:21:21 +0100 Subject: [PATCH 04/29] avoid unnecessary cast --- .../ozone/repair/datanode/schemaupgrade/UpgradeManager.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java index e61cb0acd539..8619dc9275d4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java @@ -26,7 +26,6 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,8 +45,7 @@ public static List run(OzoneConfiguration configuration, long startTime = System.currentTimeMillis(); LOG.info("Start to upgrade {} volume(s)", volumes.size()); - for (StorageVolume volume : volumes) { - final HddsVolume hddsVolume = (HddsVolume) volume; + for (HddsVolume hddsVolume : volumes) { final UpgradeTask task = new UpgradeTask(configuration, hddsVolume); final CompletableFuture future = task.getUpgradeFuture(); From 08a915a908f74a12b997a2218c2a4c90cc51f15f Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 07:22:28 +0100 Subject: [PATCH 05/29] remove unused variable and method --- .../ozone/repair/datanode/schemaupgrade/UpgradeTask.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java index 3ddf9a6f77ee..9b23aa1fcd8f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java @@ -183,7 +183,6 @@ public CompletableFuture getUpgradeFuture() { result.success(); return result; }).whenComplete((r, e) -> { - final File hddsRootDir = r.getHddsVolume().getHddsRootDir(); final File file = UpgradeUtils.getVolumeUpgradeCompleteFile(r.getHddsVolume()); // create a flag file @@ -414,10 +413,6 @@ public ContainerData getOriginContainerData() { return originContainerData; } - public ContainerData getNewContainerData() { - return newContainerData; - } - public void setBackupContainerFilePath(String backupContainerFilePath) { this.backupContainerFilePath = backupContainerFilePath; } From f09fa69f5b68171be192ded88b635b8b183f49a3 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 08:47:25 +0100 Subject: [PATCH 06/29] UpgradeChecker methods can be static --- .../ozone/repair/datanode/schemaupgrade/UpgradeChecker.java | 4 ++-- .../schemaupgrade/UpgradeContainerSchemaSubcommand.java | 6 ++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java index 79d70d7c0507..6346a2a88e9a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java @@ -36,7 +36,7 @@ */ public class UpgradeChecker { - public Pair getLayoutFeature( + public static Pair getLayoutFeature( DatanodeDetails dnDetail, OzoneConfiguration conf) throws IOException { DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf, dnDetail.getUuidString()); @@ -58,7 +58,7 @@ public Pair getLayoutFeature( return Pair.of(softwareLayoutFeature, metadataLayoutFeature); } - public List getAllVolume(DatanodeDetails detail, + public static List getAllVolume(DatanodeDetails detail, OzoneConfiguration configuration) throws IOException { final MutableVolumeSet dataVolumeSet = UpgradeUtils .getHddsVolumes(configuration, StorageVolume.VolumeType.DATA_VOLUME, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java index c3f0f97b4842..4c654ba495df 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java @@ -59,13 +59,11 @@ protected Component serviceToBeOffline() { public void execute() throws Exception { OzoneConfiguration configuration = getOzoneConf(); - final UpgradeChecker upgradeChecker = new UpgradeChecker(); - DatanodeDetails dnDetail = UpgradeUtils.getDatanodeDetails(configuration); Pair layoutFeature = - upgradeChecker.getLayoutFeature(dnDetail, configuration); + UpgradeChecker.getLayoutFeature(dnDetail, configuration); final HDDSLayoutFeature softwareLayoutFeature = layoutFeature.getLeft(); final HDDSLayoutFeature metadataLayoutFeature = layoutFeature.getRight(); final int needLayoutVersion = @@ -110,7 +108,7 @@ public void execute() throws Exception { } List allVolume = - upgradeChecker.getAllVolume(dnDetail, configuration); + UpgradeChecker.getAllVolume(dnDetail, configuration); Iterator volumeIterator = allVolume.iterator(); while (volumeIterator.hasNext()) { From c3462dfaa08da73fddd658adf4af1d80cd768a8f Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 08:49:21 +0100 Subject: [PATCH 07/29] move volume and container upgrade result classes to top-level --- .../schemaupgrade/ContainerUpgradeResult.java | 113 +++++++++++++++++ .../schemaupgrade/UpgradeManager.java | 109 ++-------------- .../datanode/schemaupgrade/UpgradeTask.java | 113 ++--------------- .../schemaupgrade/VolumeUpgradeResult.java | 118 ++++++++++++++++++ .../schemaupgrade/TestUpgradeManager.java | 8 +- 5 files changed, 253 insertions(+), 208 deletions(-) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java new file mode 100644 index 000000000000..32b13dd60a87 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; + +import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; + +/** + * This class represents upgrade v2 to v3 container result. + */ +public class ContainerUpgradeResult { + private final ContainerData originContainerData; + private ContainerData newContainerData; + private long totalRow = 0L; + private final long startTimeMs = System.currentTimeMillis(); + private long endTimeMs = 0L; + private Status status; + + private String backupContainerFilePath; + private String newContainerFilePath; + + public ContainerUpgradeResult( + ContainerData originContainerData) { + this.originContainerData = originContainerData; + this.status = Status.FAIL; + } + + public long getTotalRow() { + return totalRow; + } + + public Status getStatus() { + return status; + } + + public void setNewContainerData( + ContainerData newContainerData) { + this.newContainerData = newContainerData; + } + + public long getCostMs() { + return endTimeMs - startTimeMs; + } + + public ContainerData getOriginContainerData() { + return originContainerData; + } + + public void setBackupContainerFilePath(String backupContainerFilePath) { + this.backupContainerFilePath = backupContainerFilePath; + } + + public void setNewContainerFilePath(String newContainerFilePath) { + this.newContainerFilePath = newContainerFilePath; + } + + public void success(long rowCount) { + this.totalRow = rowCount; + this.endTimeMs = System.currentTimeMillis(); + this.status = Status.SUCCESS; + } + + @Override + public String toString() { + final StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append("Result:{"); + stringBuilder.append("containerID="); + stringBuilder.append(originContainerData.getContainerID()); + stringBuilder.append(", originContainerSchemaVersion="); + stringBuilder.append( + ((KeyValueContainerData) originContainerData).getSchemaVersion()); + + if (newContainerData != null) { + stringBuilder.append(", schemaV2ContainerFileBackupPath="); + stringBuilder.append(backupContainerFilePath); + + stringBuilder.append(", newContainerSchemaVersion="); + stringBuilder.append( + ((KeyValueContainerData) newContainerData).getSchemaVersion()); + + stringBuilder.append(", schemaV3ContainerFilePath="); + stringBuilder.append(newContainerFilePath); + } + stringBuilder.append(", totalRow="); + stringBuilder.append(totalRow); + stringBuilder.append(", costMs="); + stringBuilder.append(getCostMs()); + stringBuilder.append(", status="); + stringBuilder.append(status); + stringBuilder.append("}"); + return stringBuilder.toString(); + } + + enum Status { + SUCCESS, + FAIL + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java index 8619dc9275d4..9e10b1729812 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java @@ -23,10 +23,8 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,27 +36,27 @@ public class UpgradeManager { public static final Logger LOG = LoggerFactory.getLogger(UpgradeManager.class); - public static List run(OzoneConfiguration configuration, + public static List run(OzoneConfiguration configuration, List volumes) throws IOException { - List results = new ArrayList<>(); - Map> volumeFutures = new HashMap<>(); + List results = new ArrayList<>(); + Map> volumeFutures = new HashMap<>(); long startTime = System.currentTimeMillis(); LOG.info("Start to upgrade {} volume(s)", volumes.size()); for (HddsVolume hddsVolume : volumes) { final UpgradeTask task = new UpgradeTask(configuration, hddsVolume); - final CompletableFuture future = task.getUpgradeFuture(); + final CompletableFuture future = task.getUpgradeFuture(); volumeFutures.put(hddsVolume, future); } - for (Map.Entry> entry : + for (Map.Entry> entry : volumeFutures.entrySet()) { final HddsVolume hddsVolume = entry.getKey(); - final CompletableFuture volumeFuture = entry.getValue(); + final CompletableFuture volumeFuture = entry.getValue(); try { - final Result result = volumeFuture.get(); + final VolumeUpgradeResult result = volumeFuture.get(); results.add(result); LOG.info("Finish upgrading containers on volume {}, {}", hddsVolume.getVolumeRootDir(), result.toString()); @@ -73,97 +71,4 @@ public static List run(OzoneConfiguration configuration, return results; } - /** - * This class contains v2 to v3 container upgrade result. - */ - public static class Result { - private Map resultMap; - private final HddsVolume hddsVolume; - private final long startTimeMs = System.currentTimeMillis(); - private long endTimeMs = 0L; - private Exception e = null; - private Status status = Status.FAIL; - private DatanodeStoreSchemaThreeImpl store; - - public Result(HddsVolume hddsVolume) { - this.hddsVolume = hddsVolume; - } - - public HddsVolume getHddsVolume() { - return hddsVolume; - } - - public long getCost() { - return endTimeMs - startTimeMs; - } - - DatanodeStoreSchemaThreeImpl getDBStore() { - return store; - } - - void setDBStore(DatanodeStoreSchemaThreeImpl store) { - this.store = store; - } - - public void setResultList( - List resultList) { - resultMap = new HashMap<>(); - resultList.forEach(res -> resultMap - .put(res.getOriginContainerData().getContainerID(), res)); - } - - public Map getResultMap() { - return resultMap; - } - - public boolean isSuccess() { - return this.status == Status.SUCCESS; - } - - public void success() { - this.endTimeMs = System.currentTimeMillis(); - this.status = Status.SUCCESS; - } - - public void fail(Exception exception) { - this.endTimeMs = System.currentTimeMillis(); - this.status = Status.FAIL; - this.e = exception; - } - - @Override - public String toString() { - final StringBuilder stringBuilder = new StringBuilder(); - stringBuilder.append("Result:{"); - stringBuilder.append("hddsRootDir="); - stringBuilder.append(getHddsVolume().getHddsRootDir()); - stringBuilder.append(", resultList="); - AtomicLong total = new AtomicLong(0L); - if (resultMap != null) { - resultMap.forEach((k, r) -> { - stringBuilder.append(r.toString()); - stringBuilder.append("\n"); - total.addAndGet(r.getTotalRow()); - }); - } - stringBuilder.append(", totalRow="); - stringBuilder.append(total.get()); - stringBuilder.append(", costMs="); - stringBuilder.append(getCost()); - stringBuilder.append(", status="); - stringBuilder.append(status); - if (e != null) { - stringBuilder.append(", Exception="); - stringBuilder.append(e); - } - stringBuilder.append('}'); - return stringBuilder.toString(); - } - - enum Status { - SUCCESS, - FAIL - } - } - } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java index 9b23aa1fcd8f..ef7d7a4ffd78 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java @@ -82,15 +82,15 @@ public UpgradeTask(ConfigurationSource config, HddsVolume hddsVolume) { this.hddsVolume = hddsVolume; } - public CompletableFuture getUpgradeFuture() { + public CompletableFuture getUpgradeFuture() { final File lockFile = UpgradeUtils.getVolumeUpgradeLockFile(hddsVolume); return CompletableFuture.supplyAsync(() -> { - final UpgradeManager.Result result = - new UpgradeManager.Result(hddsVolume); + final VolumeUpgradeResult result = + new VolumeUpgradeResult(hddsVolume); - List resultList = new ArrayList<>(); + List resultList = new ArrayList<>(); final File hddsVolumeRootDir = hddsVolume.getHddsRootDir(); Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" + @@ -169,7 +169,7 @@ public CompletableFuture getUpgradeFuture() { if (containerTopDirs != null) { for (File containerTopDir : containerTopDirs) { try { - final List results = + final List results = upgradeSubContainerDir(containerTopDir); resultList.addAll(results); } catch (IOException e) { @@ -202,9 +202,9 @@ public CompletableFuture getUpgradeFuture() { }); } - private List upgradeSubContainerDir( + private List upgradeSubContainerDir( File containerTopDir) throws IOException { - List resultList = new ArrayList<>(); + List resultList = new ArrayList<>(); if (containerTopDir.isDirectory()) { File[] containerDirs = containerTopDir.listFiles(); if (containerDirs != null) { @@ -213,8 +213,8 @@ private List upgradeSubContainerDir( if (containerData != null && ((KeyValueContainerData) containerData) .hasSchema(OzoneConsts.SCHEMA_V2)) { - final UpgradeContainerResult result = - new UpgradeContainerResult(containerData); + final ContainerUpgradeResult result = + new ContainerUpgradeResult(containerData); upgradeContainer(containerData, result); resultList.add(result); } @@ -264,7 +264,7 @@ private ContainerData parseContainerData(File containerDir) { } private void upgradeContainer(ContainerData containerData, - UpgradeContainerResult result) throws IOException { + ContainerUpgradeResult result) throws IOException { final DBStore targetDBStore = dataStore.getStore(); // open container schema v2 rocksdb @@ -314,7 +314,7 @@ private long transferTableData(Table targetTable, } private void rewriteAndBackupContainerDataFile(ContainerData containerData, - UpgradeContainerResult result) throws IOException { + ContainerUpgradeResult result) throws IOException { if (containerData instanceof KeyValueContainerData) { final KeyValueContainerData keyValueContainerData = (KeyValueContainerData) containerData; @@ -372,95 +372,4 @@ public void dbBackup(File dbPath) throws IOException { } } - /** - * This class represents upgrade v2 to v3 container result. - */ - public static class UpgradeContainerResult { - private final ContainerData originContainerData; - private ContainerData newContainerData; - private long totalRow = 0L; - private final long startTimeMs = System.currentTimeMillis(); - private long endTimeMs = 0L; - private Status status; - - private String backupContainerFilePath; - private String newContainerFilePath; - - public UpgradeContainerResult( - ContainerData originContainerData) { - this.originContainerData = originContainerData; - this.status = Status.FAIL; - } - - public long getTotalRow() { - return totalRow; - } - - public Status getStatus() { - return status; - } - - public void setNewContainerData( - ContainerData newContainerData) { - this.newContainerData = newContainerData; - } - - public long getCostMs() { - return endTimeMs - startTimeMs; - } - - public ContainerData getOriginContainerData() { - return originContainerData; - } - - public void setBackupContainerFilePath(String backupContainerFilePath) { - this.backupContainerFilePath = backupContainerFilePath; - } - - public void setNewContainerFilePath(String newContainerFilePath) { - this.newContainerFilePath = newContainerFilePath; - } - - public void success(long rowCount) { - this.totalRow = rowCount; - this.endTimeMs = System.currentTimeMillis(); - this.status = Status.SUCCESS; - } - - @Override - public String toString() { - final StringBuilder stringBuilder = new StringBuilder(); - stringBuilder.append("Result:{"); - stringBuilder.append("containerID="); - stringBuilder.append(originContainerData.getContainerID()); - stringBuilder.append(", originContainerSchemaVersion="); - stringBuilder.append( - ((KeyValueContainerData) originContainerData).getSchemaVersion()); - - if (newContainerData != null) { - stringBuilder.append(", schemaV2ContainerFileBackupPath="); - stringBuilder.append(backupContainerFilePath); - - stringBuilder.append(", newContainerSchemaVersion="); - stringBuilder.append( - ((KeyValueContainerData) newContainerData).getSchemaVersion()); - - stringBuilder.append(", schemaV3ContainerFilePath="); - stringBuilder.append(newContainerFilePath); - } - stringBuilder.append(", totalRow="); - stringBuilder.append(totalRow); - stringBuilder.append(", costMs="); - stringBuilder.append(getCostMs()); - stringBuilder.append(", status="); - stringBuilder.append(status); - stringBuilder.append("}"); - return stringBuilder.toString(); - } - - enum Status { - SUCCESS, - FAIL - } - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java new file mode 100644 index 000000000000..52868d22f022 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.ozone.container.common.volume.HddsVolume; +import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; + +/** + * This class contains v2 to v3 container upgrade result. + */ +public class VolumeUpgradeResult { + private Map resultMap; + private final HddsVolume hddsVolume; + private final long startTimeMs = System.currentTimeMillis(); + private long endTimeMs = 0L; + private Exception e = null; + private Status status = Status.FAIL; + private DatanodeStoreSchemaThreeImpl store; + + public VolumeUpgradeResult(HddsVolume hddsVolume) { + this.hddsVolume = hddsVolume; + } + + public HddsVolume getHddsVolume() { + return hddsVolume; + } + + public long getCost() { + return endTimeMs - startTimeMs; + } + + DatanodeStoreSchemaThreeImpl getDBStore() { + return store; + } + + void setDBStore(DatanodeStoreSchemaThreeImpl store) { + this.store = store; + } + + public void setResultList( + List resultList) { + resultMap = new HashMap<>(); + resultList.forEach(res -> resultMap + .put(res.getOriginContainerData().getContainerID(), res)); + } + + public Map getResultMap() { + return resultMap; + } + + public boolean isSuccess() { + return this.status == Status.SUCCESS; + } + + public void success() { + this.endTimeMs = System.currentTimeMillis(); + this.status = Status.SUCCESS; + } + + public void fail(Exception exception) { + this.endTimeMs = System.currentTimeMillis(); + this.status = Status.FAIL; + this.e = exception; + } + + @Override + public String toString() { + final StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append("Result:{"); + stringBuilder.append("hddsRootDir="); + stringBuilder.append(getHddsVolume().getHddsRootDir()); + stringBuilder.append(", resultList="); + AtomicLong total = new AtomicLong(0L); + if (resultMap != null) { + resultMap.forEach((k, r) -> { + stringBuilder.append(r.toString()); + stringBuilder.append("\n"); + total.addAndGet(r.getTotalRow()); + }); + } + stringBuilder.append(", totalRow="); + stringBuilder.append(total.get()); + stringBuilder.append(", costMs="); + stringBuilder.append(getCost()); + stringBuilder.append(", status="); + stringBuilder.append(status); + if (e != null) { + stringBuilder.append(", Exception="); + stringBuilder.append(e); + } + stringBuilder.append('}'); + return stringBuilder.toString(); + } + + enum Status { + SUCCESS, + FAIL + } +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java index 8ef2699e2bfb..9663354a34e7 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java @@ -158,7 +158,7 @@ public void testUpgrade() throws IOException { shutdownAllVolume(); - final List results = + final List results = UpgradeManager.run(CONF, StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList())); @@ -266,10 +266,10 @@ public void shutdownAllVolume() { } private void checkV3MetaData(Map> blockDataMap, List results) throws IOException { - Map volumeResults = new HashMap<>(); + Map> blockDataMap, List results) throws IOException { + Map volumeResults = new HashMap<>(); - for (UpgradeManager.Result result : results) { + for (VolumeUpgradeResult result : results) { result.getResultMap().forEach((k, v) -> volumeResults.put(k, result)); } From dd46318d4c54fc6d745edcedb87b27ab1aff0a13 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 08:50:05 +0100 Subject: [PATCH 08/29] remove unused method --- .../ozone/repair/datanode/schemaupgrade/UpgradeUtils.java | 6 ------ 1 file changed, 6 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java index 14ca387486a8..db384ff8a272 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java @@ -17,8 +17,6 @@ package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME; - import com.google.common.base.Preconditions; import java.io.File; import java.io.IOException; @@ -60,10 +58,6 @@ public static DatanodeDetails getDatanodeDetails(OzoneConfiguration conf) return ContainerUtils.readDatanodeDetailsFrom(idFile); } - public static File getContainerDBPath(HddsVolume volume) { - return new File(volume.getDbParentDir(), CONTAINER_DB_NAME); - } - public static File getVolumeUpgradeCompleteFile(HddsVolume volume) { return new File(volume.getHddsRootDir(), UpgradeTask.UPGRADE_COMPLETE_FILE_NAME); From f4118b27ec2fc4b04cca61b4ab739abe5cf71d7d Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 08:51:09 +0100 Subject: [PATCH 09/29] move utils from UpgradeChecker to UpgradeUtils --- .../schemaupgrade/UpgradeChecker.java | 74 ------------------- .../UpgradeContainerSchemaSubcommand.java | 6 +- .../datanode/schemaupgrade/UpgradeUtils.java | 40 ++++++++++ 3 files changed, 43 insertions(+), 77 deletions(-) delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java deleted file mode 100644 index 6346a2a88e9a..000000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeChecker.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; -import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; -import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; -import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; -import org.apache.hadoop.ozone.container.common.volume.StorageVolume; - -/** - * This is the handler that process container upgrade checker. - */ -public class UpgradeChecker { - - public static Pair getLayoutFeature( - DatanodeDetails dnDetail, OzoneConfiguration conf) throws IOException { - DatanodeLayoutStorage layoutStorage = - new DatanodeLayoutStorage(conf, dnDetail.getUuidString()); - HDDSLayoutVersionManager layoutVersionManager = - new HDDSLayoutVersionManager(layoutStorage.getLayoutVersion()); - - final int metadataLayoutVersion = - layoutVersionManager.getMetadataLayoutVersion(); - final HDDSLayoutFeature metadataLayoutFeature = - (HDDSLayoutFeature) layoutVersionManager.getFeature( - metadataLayoutVersion); - - final int softwareLayoutVersion = - layoutVersionManager.getSoftwareLayoutVersion(); - final HDDSLayoutFeature softwareLayoutFeature = - (HDDSLayoutFeature) layoutVersionManager.getFeature( - softwareLayoutVersion); - - return Pair.of(softwareLayoutFeature, metadataLayoutFeature); - } - - public static List getAllVolume(DatanodeDetails detail, - OzoneConfiguration configuration) throws IOException { - final MutableVolumeSet dataVolumeSet = UpgradeUtils - .getHddsVolumes(configuration, StorageVolume.VolumeType.DATA_VOLUME, - detail.getUuidString()); - return StorageVolumeUtil.getHddsVolumesList(dataVolumeSet.getVolumesList()); - } - - public static boolean isAlreadyUpgraded(HddsVolume hddsVolume) { - final File migrateFile = - UpgradeUtils.getVolumeUpgradeCompleteFile(hddsVolume); - return migrateFile.exists(); - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java index 4c654ba495df..265d6746f5e5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java @@ -63,7 +63,7 @@ public void execute() throws Exception { UpgradeUtils.getDatanodeDetails(configuration); Pair layoutFeature = - UpgradeChecker.getLayoutFeature(dnDetail, configuration); + UpgradeUtils.getLayoutFeature(dnDetail, configuration); final HDDSLayoutFeature softwareLayoutFeature = layoutFeature.getLeft(); final HDDSLayoutFeature metadataLayoutFeature = layoutFeature.getRight(); final int needLayoutVersion = @@ -108,12 +108,12 @@ public void execute() throws Exception { } List allVolume = - UpgradeChecker.getAllVolume(dnDetail, configuration); + UpgradeUtils.getAllVolume(dnDetail, configuration); Iterator volumeIterator = allVolume.iterator(); while (volumeIterator.hasNext()) { HddsVolume hddsVolume = volumeIterator.next(); - if (UpgradeChecker.isAlreadyUpgraded(hddsVolume)) { + if (UpgradeUtils.isAlreadyUpgraded(hddsVolume)) { info("Volume " + hddsVolume.getVolumeRootDir() + " is already upgraded, skip it."); volumeIterator.remove(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java index db384ff8a272..539c6c1ab054 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java @@ -25,10 +25,16 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.Date; +import java.util.List; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; @@ -77,4 +83,38 @@ public static boolean createFile(File file) throws IOException { return file.exists(); } + public static Pair getLayoutFeature( + DatanodeDetails dnDetail, OzoneConfiguration conf) throws IOException { + DatanodeLayoutStorage layoutStorage = + new DatanodeLayoutStorage(conf, dnDetail.getUuidString()); + HDDSLayoutVersionManager layoutVersionManager = + new HDDSLayoutVersionManager(layoutStorage.getLayoutVersion()); + + final int metadataLayoutVersion = + layoutVersionManager.getMetadataLayoutVersion(); + final HDDSLayoutFeature metadataLayoutFeature = + (HDDSLayoutFeature) layoutVersionManager.getFeature( + metadataLayoutVersion); + + final int softwareLayoutVersion = + layoutVersionManager.getSoftwareLayoutVersion(); + final HDDSLayoutFeature softwareLayoutFeature = + (HDDSLayoutFeature) layoutVersionManager.getFeature( + softwareLayoutVersion); + + return Pair.of(softwareLayoutFeature, metadataLayoutFeature); + } + + public static List getAllVolume(DatanodeDetails detail, + OzoneConfiguration configuration) throws IOException { + final MutableVolumeSet dataVolumeSet = getHddsVolumes(configuration, StorageVolume.VolumeType.DATA_VOLUME, + detail.getUuidString()); + return StorageVolumeUtil.getHddsVolumesList(dataVolumeSet.getVolumesList()); + } + + public static boolean isAlreadyUpgraded(HddsVolume hddsVolume) { + final File migrateFile = + getVolumeUpgradeCompleteFile(hddsVolume); + return migrateFile.exists(); + } } From 603b56f527e7a8b9fd0dc91eb4854d341d6b4629 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 08:53:51 +0100 Subject: [PATCH 10/29] reduce visibility --- .../repair/datanode/schemaupgrade/ContainerUpgradeResult.java | 2 +- .../ozone/repair/datanode/schemaupgrade/UpgradeManager.java | 4 ++-- .../ozone/repair/datanode/schemaupgrade/UpgradeTask.java | 2 +- .../ozone/repair/datanode/schemaupgrade/UpgradeUtils.java | 2 +- .../repair/datanode/schemaupgrade/VolumeUpgradeResult.java | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java index 32b13dd60a87..1f3e95cdf908 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java @@ -23,7 +23,7 @@ /** * This class represents upgrade v2 to v3 container result. */ -public class ContainerUpgradeResult { +class ContainerUpgradeResult { private final ContainerData originContainerData; private ContainerData newContainerData; private long totalRow = 0L; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java index 9e10b1729812..c3356f441fcf 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java @@ -31,12 +31,12 @@ /** * This class manages v2 to v3 container upgrade. */ -public class UpgradeManager { +class UpgradeManager { public static final Logger LOG = LoggerFactory.getLogger(UpgradeManager.class); - public static List run(OzoneConfiguration configuration, + static List run(OzoneConfiguration configuration, List volumes) throws IOException { List results = new ArrayList<>(); Map> volumeFutures = new HashMap<>(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java index ef7d7a4ffd78..44c245287eb6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java @@ -60,7 +60,7 @@ /** * This class implements the v2 to v3 container upgrade process. */ -public class UpgradeTask { +class UpgradeTask { public static final Logger LOG = LoggerFactory.getLogger(UpgradeTask.class); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java index 539c6c1ab054..2ea6db0e027e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java @@ -42,7 +42,7 @@ /** * Utils functions to help upgrade v2 to v3 container functions. */ -public final class UpgradeUtils { +final class UpgradeUtils { /** Never constructed. **/ private UpgradeUtils() { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java index 52868d22f022..c78718ce9fb2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java @@ -27,7 +27,7 @@ /** * This class contains v2 to v3 container upgrade result. */ -public class VolumeUpgradeResult { +class VolumeUpgradeResult { private Map resultMap; private final HddsVolume hddsVolume; private final long startTimeMs = System.currentTimeMillis(); From b803752ea2a26ce50f069829c99bbd36343d7a5d Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 08:57:10 +0100 Subject: [PATCH 11/29] merge UpgradeManager.run into UpgradeContainerSchemaSubcommand --- .../UpgradeContainerSchemaSubcommand.java | 47 +++++++++++- .../schemaupgrade/UpgradeManager.java | 74 ------------------- ...TestUpgradeContainerSchemaSubcommand.java} | 6 +- 3 files changed, 49 insertions(+), 78 deletions(-) delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java rename hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/{TestUpgradeManager.java => TestUpgradeContainerSchemaSubcommand.java} (98%) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java index 265d6746f5e5..0baf7009d535 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java @@ -19,8 +19,13 @@ import com.google.common.base.Strings; import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -31,6 +36,8 @@ import org.apache.hadoop.ozone.common.Storage; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.repair.RepairTool; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import picocli.CommandLine; import picocli.CommandLine.Command; @@ -45,11 +52,49 @@ versionProvider = HddsVersionProvider.class) public class UpgradeContainerSchemaSubcommand extends RepairTool { + public static final Logger LOG = + LoggerFactory.getLogger(UpgradeContainerSchemaSubcommand.class); + @CommandLine.Option(names = {"--volume"}, required = false, description = "volume path") private String volume; + static List run(OzoneConfiguration configuration, + List volumes) throws IOException { + List results = new ArrayList<>(); + Map> volumeFutures = new HashMap<>(); + long startTime = System.currentTimeMillis(); + + LOG.info("Start to upgrade {} volume(s)", volumes.size()); + for (HddsVolume hddsVolume : volumes) { + final UpgradeTask task = + new UpgradeTask(configuration, hddsVolume); + final CompletableFuture future = task.getUpgradeFuture(); + volumeFutures.put(hddsVolume, future); + } + + for (Map.Entry> entry : + volumeFutures.entrySet()) { + final HddsVolume hddsVolume = entry.getKey(); + final CompletableFuture volumeFuture = entry.getValue(); + + try { + final VolumeUpgradeResult result = volumeFuture.get(); + results.add(result); + LOG.info("Finish upgrading containers on volume {}, {}", + hddsVolume.getVolumeRootDir(), result.toString()); + } catch (Exception e) { + LOG.error("Failed to upgrade containers on volume {}", + hddsVolume.getVolumeRootDir(), e); + } + } + + LOG.info("It took {}ms to finish all volume upgrade.", + (System.currentTimeMillis() - startTime)); + return results; + } + @Override protected Component serviceToBeOffline() { return Component.DATANODE; @@ -126,6 +171,6 @@ public void execute() throws Exception { } // do upgrade - UpgradeManager.run(configuration, allVolume); + run(configuration, allVolume); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java deleted file mode 100644 index c3356f441fcf..000000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeManager.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class manages v2 to v3 container upgrade. - */ -class UpgradeManager { - - public static final Logger LOG = - LoggerFactory.getLogger(UpgradeManager.class); - - static List run(OzoneConfiguration configuration, - List volumes) throws IOException { - List results = new ArrayList<>(); - Map> volumeFutures = new HashMap<>(); - long startTime = System.currentTimeMillis(); - - LOG.info("Start to upgrade {} volume(s)", volumes.size()); - for (HddsVolume hddsVolume : volumes) { - final UpgradeTask task = - new UpgradeTask(configuration, hddsVolume); - final CompletableFuture future = task.getUpgradeFuture(); - volumeFutures.put(hddsVolume, future); - } - - for (Map.Entry> entry : - volumeFutures.entrySet()) { - final HddsVolume hddsVolume = entry.getKey(); - final CompletableFuture volumeFuture = entry.getValue(); - - try { - final VolumeUpgradeResult result = volumeFuture.get(); - results.add(result); - LOG.info("Finish upgrading containers on volume {}, {}", - hddsVolume.getVolumeRootDir(), result.toString()); - } catch (Exception e) { - LOG.error("Failed to upgrade containers on volume {}", - hddsVolume.getVolumeRootDir(), e); - } - } - - LOG.info("It took {}ms to finish all volume upgrade.", - (System.currentTimeMillis() - startTime)); - return results; - } - -} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchemaSubcommand.java similarity index 98% rename from hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java rename to hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchemaSubcommand.java index 9663354a34e7..795ef90e2cde 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeManager.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchemaSubcommand.java @@ -73,9 +73,9 @@ import org.junit.jupiter.api.io.TempDir; /** - * Tests for UpgradeManager class. + * Tests for {@link UpgradeContainerSchemaSubcommand} class. */ -public class TestUpgradeManager { +class TestUpgradeContainerSchemaSubcommand { private static final String SCM_ID = UUID.randomUUID().toString(); private static final OzoneConfiguration CONF = new OzoneConfiguration(); @@ -159,7 +159,7 @@ public void testUpgrade() throws IOException { shutdownAllVolume(); final List results = - UpgradeManager.run(CONF, + UpgradeContainerSchemaSubcommand.run(CONF, StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList())); checkV3MetaData(keyValueContainerBlockDataMap, results); From f144ebfb3b4eca981d49d65939deeb84e7d6e571 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 08:57:49 +0100 Subject: [PATCH 12/29] rename to UpgradeContainerSchema --- .../apache/hadoop/ozone/repair/datanode/DatanodeRepair.java | 4 ++-- ...nerSchemaSubcommand.java => UpgradeContainerSchema.java} | 4 ++-- ...chemaSubcommand.java => TestUpgradeContainerSchema.java} | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/{UpgradeContainerSchemaSubcommand.java => UpgradeContainerSchema.java} (97%) rename hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/{TestUpgradeContainerSchemaSubcommand.java => TestUpgradeContainerSchema.java} (98%) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/DatanodeRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/DatanodeRepair.java index 57e0b50b7e2d..27250015fa48 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/DatanodeRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/DatanodeRepair.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.repair.datanode; import org.apache.hadoop.hdds.cli.RepairSubcommand; -import org.apache.hadoop.ozone.repair.datanode.schemaupgrade.UpgradeContainerSchemaSubcommand; +import org.apache.hadoop.ozone.repair.datanode.schemaupgrade.UpgradeContainerSchema; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -27,7 +27,7 @@ */ @CommandLine.Command(name = "datanode", subcommands = { - UpgradeContainerSchemaSubcommand.class, + UpgradeContainerSchema.class, }, description = "Tools to repair Datanode") @MetaInfServices(RepairSubcommand.class) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java similarity index 97% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index 0baf7009d535..573958249288 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchemaSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -50,10 +50,10 @@ "for this datanode.", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class UpgradeContainerSchemaSubcommand extends RepairTool { +public class UpgradeContainerSchema extends RepairTool { public static final Logger LOG = - LoggerFactory.getLogger(UpgradeContainerSchemaSubcommand.class); + LoggerFactory.getLogger(UpgradeContainerSchema.class); @CommandLine.Option(names = {"--volume"}, required = false, diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchemaSubcommand.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java similarity index 98% rename from hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchemaSubcommand.java rename to hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java index 795ef90e2cde..4fbff09387ed 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchemaSubcommand.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java @@ -73,9 +73,9 @@ import org.junit.jupiter.api.io.TempDir; /** - * Tests for {@link UpgradeContainerSchemaSubcommand} class. + * Tests for {@link UpgradeContainerSchema} class. */ -class TestUpgradeContainerSchemaSubcommand { +class TestUpgradeContainerSchema { private static final String SCM_ID = UUID.randomUUID().toString(); private static final OzoneConfiguration CONF = new OzoneConfiguration(); @@ -159,7 +159,7 @@ public void testUpgrade() throws IOException { shutdownAllVolume(); final List results = - UpgradeContainerSchemaSubcommand.run(CONF, + UpgradeContainerSchema.run(CONF, StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList())); checkV3MetaData(keyValueContainerBlockDataMap, results); From 6ca96714dd9feb209ea7ce62fac4692b7426f485 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 09:06:37 +0100 Subject: [PATCH 13/29] move UpgradeTask into UpgradeContainerSchema --- .../schemaupgrade/UpgradeContainerSchema.java | 337 +++++++++++++++- .../datanode/schemaupgrade/UpgradeTask.java | 375 ------------------ .../datanode/schemaupgrade/UpgradeUtils.java | 16 +- .../TestUpgradeContainerSchema.java | 2 +- 4 files changed, 349 insertions(+), 381 deletions(-) delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index 573958249288..a376f83b78d7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -17,24 +17,53 @@ package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME; +import static org.apache.hadoop.ozone.repair.datanode.schemaupgrade.UpgradeUtils.BACKUP_CONTAINER_DATA_FILE_SUFFIX; +import static org.apache.hadoop.ozone.repair.datanode.schemaupgrade.UpgradeUtils.COLUMN_FAMILIES_NAME; + +import com.google.common.base.Preconditions; import com.google.common.base.Strings; import java.io.File; import java.io.IOException; +import java.text.SimpleDateFormat; import java.util.ArrayList; +import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; +import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Storage; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; +import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; +import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache; +import org.apache.hadoop.ozone.container.common.utils.RawDB; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; +import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; +import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; import org.apache.hadoop.ozone.repair.RepairTool; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,7 +89,7 @@ public class UpgradeContainerSchema extends RepairTool { description = "volume path") private String volume; - static List run(OzoneConfiguration configuration, + List run(OzoneConfiguration configuration, List volumes) throws IOException { List results = new ArrayList<>(); Map> volumeFutures = new HashMap<>(); @@ -173,4 +202,310 @@ public void execute() throws Exception { // do upgrade run(configuration, allVolume); } + + /** + * This class implements the v2 to v3 container upgrade process. + */ + private class UpgradeTask { + + private final ConfigurationSource config; + private final HddsVolume hddsVolume; + private DatanodeStoreSchemaThreeImpl dataStore; + + public UpgradeTask(ConfigurationSource config, HddsVolume hddsVolume) { + this.config = config; + this.hddsVolume = hddsVolume; + } + + public CompletableFuture getUpgradeFuture() { + final File lockFile = UpgradeUtils.getVolumeUpgradeLockFile(hddsVolume); + + return CompletableFuture.supplyAsync(() -> { + + final VolumeUpgradeResult result = + new VolumeUpgradeResult(hddsVolume); + + List resultList = new ArrayList<>(); + final File hddsVolumeRootDir = hddsVolume.getHddsRootDir(); + + Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" + + "cannot be null"); + + // check CID directory and current file + File clusterIDDir = new File(hddsVolume.getStorageDir(), + hddsVolume.getClusterID()); + if (!clusterIDDir.exists() || !clusterIDDir.isDirectory()) { + result.fail(new Exception("Volume " + hddsVolumeRootDir + + " is in an inconsistent state. Expected " + + "clusterID directory " + clusterIDDir + + " is not found or not a directory.")); + return result; + } + File currentDir = new File(clusterIDDir, Storage.STORAGE_DIR_CURRENT); + if (!currentDir.exists() || !currentDir.isDirectory()) { + result.fail(new Exception( + "Current dir " + currentDir + " is not found or not a directory," + + " skip upgrade.")); + return result; + } + + try { + // create lock file + if (!lockFile.createNewFile()) { + result.fail(new Exception("Upgrade lock file already exists " + + lockFile.getAbsolutePath() + ", skip upgrade.")); + return result; + } + } catch (IOException e) { + result.fail(new Exception("Failed to create upgrade lock file " + + lockFile.getAbsolutePath() + ", skip upgrade.")); + return result; + } + + // check complete file again + final File completeFile = + UpgradeUtils.getVolumeUpgradeCompleteFile(hddsVolume); + if (completeFile.exists()) { + result.fail(new Exception("Upgrade complete file already exists " + + completeFile.getAbsolutePath() + ", skip upgrade.")); + if (!lockFile.delete()) { + LOG.warn("Failed to delete upgrade lock file {}.", lockFile); + } + return result; + } + + // backup DB directory + final File volumeDBPath; + try { + volumeDBPath = getVolumeDBPath(hddsVolume); + dbBackup(volumeDBPath); + } catch (IOException e) { + result.fail(new Exception(e.getMessage() + ", skip upgrade.")); + return result; + } + + // load DB store + try { + hddsVolume.loadDbStore(false); + RawDB db = DatanodeStoreCache.getInstance().getDB( + volumeDBPath.getAbsolutePath(), config); + dataStore = (DatanodeStoreSchemaThreeImpl) db.getStore(); + result.setDBStore(dataStore); + } catch (IOException e) { + result.fail(new Exception( + "Failed to load db for volume " + hddsVolume.getVolumeRootDir() + + " for " + e.getMessage() + ", skip upgrade.")); + return result; + } + + LOG.info("Start to upgrade containers on volume {}", + hddsVolume.getVolumeRootDir()); + File[] containerTopDirs = currentDir.listFiles(); + if (containerTopDirs != null) { + for (File containerTopDir : containerTopDirs) { + try { + final List results = + upgradeSubContainerDir(containerTopDir); + resultList.addAll(results); + } catch (IOException e) { + result.fail(e); + return result; + } + } + } + + result.setResultList(resultList); + result.success(); + return result; + }).whenComplete((r, e) -> { + final File file = + UpgradeUtils.getVolumeUpgradeCompleteFile(r.getHddsVolume()); + // create a flag file + if (e == null && r.isSuccess()) { + try { + UpgradeUtils.createFile(file); + } catch (IOException ioe) { + LOG.warn("Failed to create upgrade complete file {}.", file, ioe); + } + } + if (lockFile.exists()) { + boolean deleted = lockFile.delete(); + if (!deleted) { + LOG.warn("Failed to delete upgrade lock file {}.", file); + } + } + }); + } + + private List upgradeSubContainerDir( + File containerTopDir) throws IOException { + List resultList = new ArrayList<>(); + if (containerTopDir.isDirectory()) { + File[] containerDirs = containerTopDir.listFiles(); + if (containerDirs != null) { + for (File containerDir : containerDirs) { + final ContainerData containerData = parseContainerData(containerDir); + if (containerData != null && + ((KeyValueContainerData) containerData) + .hasSchema(OzoneConsts.SCHEMA_V2)) { + final ContainerUpgradeResult result = + new ContainerUpgradeResult(containerData); + upgradeContainer(containerData, result); + resultList.add(result); + } + } + } + } + return resultList; + } + + private ContainerData parseContainerData(File containerDir) { + try { + File containerFile = ContainerUtils.getContainerFile(containerDir); + long containerID = ContainerUtils.getContainerID(containerDir); + if (!containerFile.exists()) { + LOG.error("Missing .container file: {}.", containerDir); + return null; + } + try { + ContainerData containerData = + ContainerDataYaml.readContainerFile(containerFile); + if (containerID != containerData.getContainerID()) { + LOG.error("ContainerID in file {} mismatch with expected {}.", + containerFile, containerID); + return null; + } + if (containerData.getContainerType().equals( + ContainerProtos.ContainerType.KeyValueContainer) && + containerData instanceof KeyValueContainerData) { + KeyValueContainerData kvContainerData = + (KeyValueContainerData) containerData; + containerData.setVolume(hddsVolume); + KeyValueContainerUtil.parseKVContainerData(kvContainerData, config); + return kvContainerData; + } else { + LOG.error("Container is not KeyValueContainer type: {}.", + containerDir); + return null; + } + } catch (IOException ex) { + LOG.error("Failed to parse ContainerFile: {}.", containerFile, ex); + return null; + } + } catch (Throwable e) { + LOG.error("Failed to load container: {}.", containerDir, e); + return null; + } + } + + private void upgradeContainer(ContainerData containerData, + ContainerUpgradeResult result) throws IOException { + final DBStore targetDBStore = dataStore.getStore(); + + // open container schema v2 rocksdb + final DatanodeStore dbStore = BlockUtils + .getUncachedDatanodeStore((KeyValueContainerData) containerData, config, + true); + final DBStore sourceDBStore = dbStore.getStore(); + + long total = 0L; + for (String tableName : COLUMN_FAMILIES_NAME) { + total += transferTableData(targetDBStore, sourceDBStore, tableName, + containerData); + } + + rewriteAndBackupContainerDataFile(containerData, result); + result.success(total); + } + + private long transferTableData(DBStore targetDBStore, + DBStore sourceDBStore, String tableName, ContainerData containerData) + throws IOException { + final Table deleteTransactionTable = + sourceDBStore.getTable(tableName); + final Table targetDeleteTransactionTable = + targetDBStore.getTable(tableName); + return transferTableData(targetDeleteTransactionTable, + deleteTransactionTable, containerData); + } + + private long transferTableData(Table targetTable, + Table sourceTable, ContainerData containerData) + throws IOException { + long count = 0; + try (TableIterator> + iter = sourceTable.iterator()) { + while (iter.hasNext()) { + count++; + Table.KeyValue next = iter.next(); + String key = DatanodeSchemaThreeDBDefinition + .getContainerKeyPrefix(containerData.getContainerID()) + + StringUtils.bytes2String(next.getKey()); + targetTable + .put(FixedLengthStringCodec.string2Bytes(key), next.getValue()); + } + } + return count; + } + + private void rewriteAndBackupContainerDataFile(ContainerData containerData, + ContainerUpgradeResult result) throws IOException { + if (containerData instanceof KeyValueContainerData) { + final KeyValueContainerData keyValueContainerData = + (KeyValueContainerData) containerData; + + final KeyValueContainerData copyContainerData = + new KeyValueContainerData(keyValueContainerData); + + copyContainerData.setSchemaVersion(OzoneConsts.SCHEMA_V3); + copyContainerData.setState(keyValueContainerData.getState()); + copyContainerData.setVolume(keyValueContainerData.getVolume()); + + final File originContainerFile = KeyValueContainer + .getContainerFile(keyValueContainerData.getMetadataPath(), + keyValueContainerData.getContainerID()); + + final File bakFile = new File(keyValueContainerData.getMetadataPath(), + keyValueContainerData.getContainerID() + + BACKUP_CONTAINER_DATA_FILE_SUFFIX); + + // backup v2 container data file + NativeIO.renameTo(originContainerFile, bakFile); + result.setBackupContainerFilePath(bakFile.getAbsolutePath()); + + // gen new v3 container data file + ContainerDataYaml.createContainerFile( + ContainerProtos.ContainerType.KeyValueContainer, + copyContainerData, originContainerFile); + + result.setNewContainerData(copyContainerData); + result.setNewContainerFilePath(originContainerFile.getAbsolutePath()); + } + } + + private File getVolumeDBPath(HddsVolume volume) throws IOException { + File clusterIdDir = new File(volume.getStorageDir(), volume.getClusterID()); + File storageIdDir = new File(clusterIdDir, volume.getStorageID()); + File containerDBPath = new File(storageIdDir, CONTAINER_DB_NAME); + if (containerDBPath.exists() && containerDBPath.isDirectory()) { + return containerDBPath; + } else { + throw new IOException("DB " + containerDBPath + + " doesn't exist or is not a directory"); + } + } + + private void dbBackup(File dbPath) throws IOException { + final File backup = new File(dbPath.getParentFile(), + new SimpleDateFormat("yyyy-MM-dd'T'HH-mm-ss").format(new Date()) + + "-" + dbPath.getName() + ".backup"); + if (backup.exists()) { + throw new IOException("Backup dir " + backup + "already exists"); + } else { + FileUtils.copyDirectory(dbPath, backup, true); + System.out.println("DB " + dbPath + " is backup to " + backup); + } + } + + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java deleted file mode 100644 index 44c245287eb6..000000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeTask.java +++ /dev/null @@ -1,375 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; - -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME; - -import com.google.common.base.Preconditions; -import java.io.File; -import java.io.IOException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache; -import org.apache.hadoop.ozone.container.common.utils.RawDB; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; -import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; -import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class implements the v2 to v3 container upgrade process. - */ -class UpgradeTask { - - public static final Logger LOG = - LoggerFactory.getLogger(UpgradeTask.class); - - private final ConfigurationSource config; - private final HddsVolume hddsVolume; - private DatanodeStoreSchemaThreeImpl dataStore; - - private static final String BACKUP_CONTAINER_DATA_FILE_SUFFIX = ".backup"; - public static final String UPGRADE_COMPLETE_FILE_NAME = "upgrade.complete"; - public static final String UPGRADE_LOCK_FILE_NAME = "upgrade.lock"; - - private static final Set COLUMN_FAMILIES_NAME = - (new DatanodeSchemaTwoDBDefinition("", new OzoneConfiguration())) - .getMap().keySet(); - - public UpgradeTask(ConfigurationSource config, HddsVolume hddsVolume) { - this.config = config; - this.hddsVolume = hddsVolume; - } - - public CompletableFuture getUpgradeFuture() { - final File lockFile = UpgradeUtils.getVolumeUpgradeLockFile(hddsVolume); - - return CompletableFuture.supplyAsync(() -> { - - final VolumeUpgradeResult result = - new VolumeUpgradeResult(hddsVolume); - - List resultList = new ArrayList<>(); - final File hddsVolumeRootDir = hddsVolume.getHddsRootDir(); - - Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" + - "cannot be null"); - - // check CID directory and current file - File clusterIDDir = new File(hddsVolume.getStorageDir(), - hddsVolume.getClusterID()); - if (!clusterIDDir.exists() || !clusterIDDir.isDirectory()) { - result.fail(new Exception("Volume " + hddsVolumeRootDir + - " is in an inconsistent state. Expected " + - "clusterID directory " + clusterIDDir + - " is not found or not a directory.")); - return result; - } - File currentDir = new File(clusterIDDir, Storage.STORAGE_DIR_CURRENT); - if (!currentDir.exists() || !currentDir.isDirectory()) { - result.fail(new Exception( - "Current dir " + currentDir + " is not found or not a directory," - + " skip upgrade.")); - return result; - } - - try { - // create lock file - if (!lockFile.createNewFile()) { - result.fail(new Exception("Upgrade lock file already exists " + - lockFile.getAbsolutePath() + ", skip upgrade.")); - return result; - } - } catch (IOException e) { - result.fail(new Exception("Failed to create upgrade lock file " + - lockFile.getAbsolutePath() + ", skip upgrade.")); - return result; - } - - // check complete file again - final File completeFile = - UpgradeUtils.getVolumeUpgradeCompleteFile(hddsVolume); - if (completeFile.exists()) { - result.fail(new Exception("Upgrade complete file already exists " + - completeFile.getAbsolutePath() + ", skip upgrade.")); - if (!lockFile.delete()) { - LOG.warn("Failed to delete upgrade lock file {}.", lockFile); - } - return result; - } - - // backup DB directory - final File volumeDBPath; - try { - volumeDBPath = getVolumeDBPath(hddsVolume); - dbBackup(volumeDBPath); - } catch (IOException e) { - result.fail(new Exception(e.getMessage() + ", skip upgrade.")); - return result; - } - - // load DB store - try { - hddsVolume.loadDbStore(false); - RawDB db = DatanodeStoreCache.getInstance().getDB( - volumeDBPath.getAbsolutePath(), config); - dataStore = (DatanodeStoreSchemaThreeImpl) db.getStore(); - result.setDBStore(dataStore); - } catch (IOException e) { - result.fail(new Exception( - "Failed to load db for volume " + hddsVolume.getVolumeRootDir() + - " for " + e.getMessage() + ", skip upgrade.")); - return result; - } - - LOG.info("Start to upgrade containers on volume {}", - hddsVolume.getVolumeRootDir()); - File[] containerTopDirs = currentDir.listFiles(); - if (containerTopDirs != null) { - for (File containerTopDir : containerTopDirs) { - try { - final List results = - upgradeSubContainerDir(containerTopDir); - resultList.addAll(results); - } catch (IOException e) { - result.fail(e); - return result; - } - } - } - - result.setResultList(resultList); - result.success(); - return result; - }).whenComplete((r, e) -> { - final File file = - UpgradeUtils.getVolumeUpgradeCompleteFile(r.getHddsVolume()); - // create a flag file - if (e == null && r.isSuccess()) { - try { - UpgradeUtils.createFile(file); - } catch (IOException ioe) { - LOG.warn("Failed to create upgrade complete file {}.", file, ioe); - } - } - if (lockFile.exists()) { - boolean deleted = lockFile.delete(); - if (!deleted) { - LOG.warn("Failed to delete upgrade lock file {}.", file); - } - } - }); - } - - private List upgradeSubContainerDir( - File containerTopDir) throws IOException { - List resultList = new ArrayList<>(); - if (containerTopDir.isDirectory()) { - File[] containerDirs = containerTopDir.listFiles(); - if (containerDirs != null) { - for (File containerDir : containerDirs) { - final ContainerData containerData = parseContainerData(containerDir); - if (containerData != null && - ((KeyValueContainerData) containerData) - .hasSchema(OzoneConsts.SCHEMA_V2)) { - final ContainerUpgradeResult result = - new ContainerUpgradeResult(containerData); - upgradeContainer(containerData, result); - resultList.add(result); - } - } - } - } - return resultList; - } - - private ContainerData parseContainerData(File containerDir) { - try { - File containerFile = ContainerUtils.getContainerFile(containerDir); - long containerID = ContainerUtils.getContainerID(containerDir); - if (!containerFile.exists()) { - LOG.error("Missing .container file: {}.", containerDir); - return null; - } - try { - ContainerData containerData = - ContainerDataYaml.readContainerFile(containerFile); - if (containerID != containerData.getContainerID()) { - LOG.error("ContainerID in file {} mismatch with expected {}.", - containerFile, containerID); - return null; - } - if (containerData.getContainerType().equals( - ContainerProtos.ContainerType.KeyValueContainer) && - containerData instanceof KeyValueContainerData) { - KeyValueContainerData kvContainerData = - (KeyValueContainerData) containerData; - containerData.setVolume(hddsVolume); - KeyValueContainerUtil.parseKVContainerData(kvContainerData, config); - return kvContainerData; - } else { - LOG.error("Container is not KeyValueContainer type: {}.", - containerDir); - return null; - } - } catch (IOException ex) { - LOG.error("Failed to parse ContainerFile: {}.", containerFile, ex); - return null; - } - } catch (Throwable e) { - LOG.error("Failed to load container: {}.", containerDir, e); - return null; - } - } - - private void upgradeContainer(ContainerData containerData, - ContainerUpgradeResult result) throws IOException { - final DBStore targetDBStore = dataStore.getStore(); - - // open container schema v2 rocksdb - final DatanodeStore dbStore = BlockUtils - .getUncachedDatanodeStore((KeyValueContainerData) containerData, config, - true); - final DBStore sourceDBStore = dbStore.getStore(); - - long total = 0L; - for (String tableName : COLUMN_FAMILIES_NAME) { - total += transferTableData(targetDBStore, sourceDBStore, tableName, - containerData); - } - - rewriteAndBackupContainerDataFile(containerData, result); - result.success(total); - } - - private long transferTableData(DBStore targetDBStore, - DBStore sourceDBStore, String tableName, ContainerData containerData) - throws IOException { - final Table deleteTransactionTable = - sourceDBStore.getTable(tableName); - final Table targetDeleteTransactionTable = - targetDBStore.getTable(tableName); - return transferTableData(targetDeleteTransactionTable, - deleteTransactionTable, containerData); - } - - private long transferTableData(Table targetTable, - Table sourceTable, ContainerData containerData) - throws IOException { - long count = 0; - try (TableIterator> - iter = sourceTable.iterator()) { - while (iter.hasNext()) { - count++; - Table.KeyValue next = iter.next(); - String key = DatanodeSchemaThreeDBDefinition - .getContainerKeyPrefix(containerData.getContainerID()) - + StringUtils.bytes2String(next.getKey()); - targetTable - .put(FixedLengthStringCodec.string2Bytes(key), next.getValue()); - } - } - return count; - } - - private void rewriteAndBackupContainerDataFile(ContainerData containerData, - ContainerUpgradeResult result) throws IOException { - if (containerData instanceof KeyValueContainerData) { - final KeyValueContainerData keyValueContainerData = - (KeyValueContainerData) containerData; - - final KeyValueContainerData copyContainerData = - new KeyValueContainerData(keyValueContainerData); - - copyContainerData.setSchemaVersion(OzoneConsts.SCHEMA_V3); - copyContainerData.setState(keyValueContainerData.getState()); - copyContainerData.setVolume(keyValueContainerData.getVolume()); - - final File originContainerFile = KeyValueContainer - .getContainerFile(keyValueContainerData.getMetadataPath(), - keyValueContainerData.getContainerID()); - - final File bakFile = new File(keyValueContainerData.getMetadataPath(), - keyValueContainerData.getContainerID() + - BACKUP_CONTAINER_DATA_FILE_SUFFIX); - - // backup v2 container data file - NativeIO.renameTo(originContainerFile, bakFile); - result.setBackupContainerFilePath(bakFile.getAbsolutePath()); - - // gen new v3 container data file - ContainerDataYaml.createContainerFile( - ContainerProtos.ContainerType.KeyValueContainer, - copyContainerData, originContainerFile); - - result.setNewContainerData(copyContainerData); - result.setNewContainerFilePath(originContainerFile.getAbsolutePath()); - } - } - - public File getVolumeDBPath(HddsVolume volume) throws IOException { - File clusterIdDir = new File(volume.getStorageDir(), volume.getClusterID()); - File storageIdDir = new File(clusterIdDir, volume.getStorageID()); - File containerDBPath = new File(storageIdDir, CONTAINER_DB_NAME); - if (containerDBPath.exists() && containerDBPath.isDirectory()) { - return containerDBPath; - } else { - throw new IOException("DB " + containerDBPath + - " doesn't exist or is not a directory"); - } - } - - public void dbBackup(File dbPath) throws IOException { - final File backup = new File(dbPath.getParentFile(), - new SimpleDateFormat("yyyy-MM-dd'T'HH-mm-ss").format(new Date()) + - "-" + dbPath.getName() + ".backup"); - if (backup.exists()) { - throw new IOException("Backup dir " + backup + "already exists"); - } else { - FileUtils.copyDirectory(dbPath, backup, true); - System.out.println("DB " + dbPath + " is backup to " + backup); - } - } - -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java index 2ea6db0e027e..7a9d569cd8c2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java @@ -26,6 +26,7 @@ import java.nio.file.Files; import java.util.Date; import java.util.List; +import java.util.Set; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -38,12 +39,21 @@ import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; +import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition; /** * Utils functions to help upgrade v2 to v3 container functions. */ final class UpgradeUtils { + public static final Set COLUMN_FAMILIES_NAME = + (new DatanodeSchemaTwoDBDefinition("", new OzoneConfiguration())) + .getMap().keySet(); + + public static final String BACKUP_CONTAINER_DATA_FILE_SUFFIX = ".backup"; + public static final String UPGRADE_COMPLETE_FILE_NAME = "upgrade.complete"; + public static final String UPGRADE_LOCK_FILE_NAME = "upgrade.lock"; + /** Never constructed. **/ private UpgradeUtils() { @@ -65,13 +75,11 @@ public static DatanodeDetails getDatanodeDetails(OzoneConfiguration conf) } public static File getVolumeUpgradeCompleteFile(HddsVolume volume) { - return new File(volume.getHddsRootDir(), - UpgradeTask.UPGRADE_COMPLETE_FILE_NAME); + return new File(volume.getHddsRootDir(), UPGRADE_COMPLETE_FILE_NAME); } public static File getVolumeUpgradeLockFile(HddsVolume volume) { - return new File(volume.getHddsRootDir(), - UpgradeTask.UPGRADE_LOCK_FILE_NAME); + return new File(volume.getHddsRootDir(), UPGRADE_LOCK_FILE_NAME); } public static boolean createFile(File file) throws IOException { diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java index 4fbff09387ed..cabd8293a91a 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java @@ -159,7 +159,7 @@ public void testUpgrade() throws IOException { shutdownAllVolume(); final List results = - UpgradeContainerSchema.run(CONF, + new UpgradeContainerSchema().run(CONF, StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList())); checkV3MetaData(keyValueContainerBlockDataMap, results); From b2eb311213cfd06b34eb9610d59cd9cd92307013 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 09:08:17 +0100 Subject: [PATCH 14/29] remove IOException not thrown --- .../repair/datanode/schemaupgrade/UpgradeContainerSchema.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index a376f83b78d7..e58892796464 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -89,8 +89,7 @@ public class UpgradeContainerSchema extends RepairTool { description = "volume path") private String volume; - List run(OzoneConfiguration configuration, - List volumes) throws IOException { + List run(OzoneConfiguration configuration, List volumes) { List results = new ArrayList<>(); Map> volumeFutures = new HashMap<>(); long startTime = System.currentTimeMillis(); From bcecb8598f3944c8f93d69bbe2b381c4c55c724b Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 09:08:36 +0100 Subject: [PATCH 15/29] remove required=false (default) --- .../repair/datanode/schemaupgrade/UpgradeContainerSchema.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index e58892796464..d28579e1019f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -85,7 +85,6 @@ public class UpgradeContainerSchema extends RepairTool { LoggerFactory.getLogger(UpgradeContainerSchema.class); @CommandLine.Option(names = {"--volume"}, - required = false, description = "volume path") private String volume; From be63d2cf50188a738f3aec68e8bf5bc636abedc9 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 09:09:43 +0100 Subject: [PATCH 16/29] wrap in unmodifiableSet --- .../ozone/repair/datanode/schemaupgrade/UpgradeUtils.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java index 7a9d569cd8c2..e88428bfb3f7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java @@ -24,6 +24,7 @@ import java.io.Writer; import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Set; @@ -46,9 +47,9 @@ */ final class UpgradeUtils { - public static final Set COLUMN_FAMILIES_NAME = - (new DatanodeSchemaTwoDBDefinition("", new OzoneConfiguration())) - .getMap().keySet(); + public static final Set COLUMN_FAMILIES_NAME = Collections.unmodifiableSet( + new DatanodeSchemaTwoDBDefinition("", new OzoneConfiguration()) + .getMap().keySet()); public static final String BACKUP_CONTAINER_DATA_FILE_SUFFIX = ".backup"; public static final String UPGRADE_COMPLETE_FILE_NAME = "upgrade.complete"; From 9ae895f51fbef46e02b241eb89f71e713f03f1a5 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 09:10:05 +0100 Subject: [PATCH 17/29] rename COLUMN_FAMILIES_NAME to COLUMN_FAMILY_NAMES --- .../repair/datanode/schemaupgrade/UpgradeContainerSchema.java | 4 ++-- .../ozone/repair/datanode/schemaupgrade/UpgradeUtils.java | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index d28579e1019f..19e39f99d44f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME; import static org.apache.hadoop.ozone.repair.datanode.schemaupgrade.UpgradeUtils.BACKUP_CONTAINER_DATA_FILE_SUFFIX; -import static org.apache.hadoop.ozone.repair.datanode.schemaupgrade.UpgradeUtils.COLUMN_FAMILIES_NAME; +import static org.apache.hadoop.ozone.repair.datanode.schemaupgrade.UpgradeUtils.COLUMN_FAMILY_NAMES; import com.google.common.base.Preconditions; import com.google.common.base.Strings; @@ -407,7 +407,7 @@ private void upgradeContainer(ContainerData containerData, final DBStore sourceDBStore = dbStore.getStore(); long total = 0L; - for (String tableName : COLUMN_FAMILIES_NAME) { + for (String tableName : COLUMN_FAMILY_NAMES) { total += transferTableData(targetDBStore, sourceDBStore, tableName, containerData); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java index e88428bfb3f7..4cfdb3ed841a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeUtils.java @@ -47,7 +47,7 @@ */ final class UpgradeUtils { - public static final Set COLUMN_FAMILIES_NAME = Collections.unmodifiableSet( + public static final Set COLUMN_FAMILY_NAMES = Collections.unmodifiableSet( new DatanodeSchemaTwoDBDefinition("", new OzoneConfiguration()) .getMap().keySet()); From 3f129366f9e39ece88027d683b1a9d0644914112 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 09:19:57 +0100 Subject: [PATCH 18/29] fix checkstyle --- .../schemaupgrade/ContainerUpgradeResult.java | 6 ++---- .../schemaupgrade/UpgradeContainerSchema.java | 12 ++++++------ .../datanode/schemaupgrade/VolumeUpgradeResult.java | 6 +++--- .../schemaupgrade/TestUpgradeContainerSchema.java | 2 +- 4 files changed, 12 insertions(+), 14 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java index 1f3e95cdf908..10b10278a09e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java @@ -29,15 +29,13 @@ class ContainerUpgradeResult { private long totalRow = 0L; private final long startTimeMs = System.currentTimeMillis(); private long endTimeMs = 0L; - private Status status; + private Status status = Status.FAIL; private String backupContainerFilePath; private String newContainerFilePath; - public ContainerUpgradeResult( - ContainerData originContainerData) { + ContainerUpgradeResult(ContainerData originContainerData) { this.originContainerData = originContainerData; - this.status = Status.FAIL; } public long getTotalRow() { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index 19e39f99d44f..49792087979f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -210,7 +210,7 @@ private class UpgradeTask { private final HddsVolume hddsVolume; private DatanodeStoreSchemaThreeImpl dataStore; - public UpgradeTask(ConfigurationSource config, HddsVolume hddsVolume) { + UpgradeTask(ConfigurationSource config, HddsVolume hddsVolume) { this.config = config; this.hddsVolume = hddsVolume; } @@ -275,7 +275,7 @@ public CompletableFuture getUpgradeFuture() { // backup DB directory final File volumeDBPath; try { - volumeDBPath = getVolumeDBPath(hddsVolume); + volumeDBPath = getVolumeDBPath(); dbBackup(volumeDBPath); } catch (IOException e) { result.fail(new Exception(e.getMessage() + ", skip upgrade.")); @@ -288,7 +288,7 @@ public CompletableFuture getUpgradeFuture() { RawDB db = DatanodeStoreCache.getInstance().getDB( volumeDBPath.getAbsolutePath(), config); dataStore = (DatanodeStoreSchemaThreeImpl) db.getStore(); - result.setDBStore(dataStore); + result.setStore(dataStore); } catch (IOException e) { result.fail(new Exception( "Failed to load db for volume " + hddsVolume.getVolumeRootDir() + @@ -481,9 +481,9 @@ private void rewriteAndBackupContainerDataFile(ContainerData containerData, } } - private File getVolumeDBPath(HddsVolume volume) throws IOException { - File clusterIdDir = new File(volume.getStorageDir(), volume.getClusterID()); - File storageIdDir = new File(clusterIdDir, volume.getStorageID()); + private File getVolumeDBPath() throws IOException { + File clusterIdDir = new File(hddsVolume.getStorageDir(), hddsVolume.getClusterID()); + File storageIdDir = new File(clusterIdDir, hddsVolume.getStorageID()); File containerDBPath = new File(storageIdDir, CONTAINER_DB_NAME); if (containerDBPath.exists() && containerDBPath.isDirectory()) { return containerDBPath; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java index c78718ce9fb2..a07adcaf4530 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/VolumeUpgradeResult.java @@ -36,7 +36,7 @@ class VolumeUpgradeResult { private Status status = Status.FAIL; private DatanodeStoreSchemaThreeImpl store; - public VolumeUpgradeResult(HddsVolume hddsVolume) { + VolumeUpgradeResult(HddsVolume hddsVolume) { this.hddsVolume = hddsVolume; } @@ -48,11 +48,11 @@ public long getCost() { return endTimeMs - startTimeMs; } - DatanodeStoreSchemaThreeImpl getDBStore() { + DatanodeStoreSchemaThreeImpl getStore() { return store; } - void setDBStore(DatanodeStoreSchemaThreeImpl store) { + void setStore(DatanodeStoreSchemaThreeImpl store) { this.store = store; } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java index cabd8293a91a..3bb4ff7c16f2 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java @@ -280,7 +280,7 @@ private void checkV3MetaData(Map blockDataTable = datanodeStoreSchemaThree.getBlockDataTable(); From 4accb8a2cd23957ade715d013754c022a8396f1a Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 09:22:25 +0100 Subject: [PATCH 19/29] use stdout/stderr instead of Logger --- .../schemaupgrade/UpgradeContainerSchema.java | 33 ++++++++----------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index 49792087979f..bd77c465e730 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -65,8 +65,6 @@ import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; import org.apache.hadoop.ozone.repair.RepairTool; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine; import picocli.CommandLine.Command; @@ -81,9 +79,6 @@ versionProvider = HddsVersionProvider.class) public class UpgradeContainerSchema extends RepairTool { - public static final Logger LOG = - LoggerFactory.getLogger(UpgradeContainerSchema.class); - @CommandLine.Option(names = {"--volume"}, description = "volume path") private String volume; @@ -93,7 +88,7 @@ List run(OzoneConfiguration configuration, List Map> volumeFutures = new HashMap<>(); long startTime = System.currentTimeMillis(); - LOG.info("Start to upgrade {} volume(s)", volumes.size()); + info("Start to upgrade %s volume(s)", volumes.size()); for (HddsVolume hddsVolume : volumes) { final UpgradeTask task = new UpgradeTask(configuration, hddsVolume); @@ -109,15 +104,15 @@ List run(OzoneConfiguration configuration, List try { final VolumeUpgradeResult result = volumeFuture.get(); results.add(result); - LOG.info("Finish upgrading containers on volume {}, {}", - hddsVolume.getVolumeRootDir(), result.toString()); + info("Finish upgrading containers on volume %s, %s", + hddsVolume.getVolumeRootDir(), result); } catch (Exception e) { - LOG.error("Failed to upgrade containers on volume {}", + error("Failed to upgrade containers on volume %s", hddsVolume.getVolumeRootDir(), e); } } - LOG.info("It took {}ms to finish all volume upgrade.", + info("It took %sms to finish all volume upgrade.", (System.currentTimeMillis() - startTime)); return results; } @@ -267,7 +262,7 @@ public CompletableFuture getUpgradeFuture() { result.fail(new Exception("Upgrade complete file already exists " + completeFile.getAbsolutePath() + ", skip upgrade.")); if (!lockFile.delete()) { - LOG.warn("Failed to delete upgrade lock file {}.", lockFile); + error("Failed to delete upgrade lock file %s.", lockFile); } return result; } @@ -296,7 +291,7 @@ public CompletableFuture getUpgradeFuture() { return result; } - LOG.info("Start to upgrade containers on volume {}", + info("Start to upgrade containers on volume %s", hddsVolume.getVolumeRootDir()); File[] containerTopDirs = currentDir.listFiles(); if (containerTopDirs != null) { @@ -323,13 +318,13 @@ public CompletableFuture getUpgradeFuture() { try { UpgradeUtils.createFile(file); } catch (IOException ioe) { - LOG.warn("Failed to create upgrade complete file {}.", file, ioe); + error("Failed to create upgrade complete file %s.", file, ioe); } } if (lockFile.exists()) { boolean deleted = lockFile.delete(); if (!deleted) { - LOG.warn("Failed to delete upgrade lock file {}.", file); + error("Failed to delete upgrade lock file %s.", file); } } }); @@ -362,14 +357,14 @@ private ContainerData parseContainerData(File containerDir) { File containerFile = ContainerUtils.getContainerFile(containerDir); long containerID = ContainerUtils.getContainerID(containerDir); if (!containerFile.exists()) { - LOG.error("Missing .container file: {}.", containerDir); + error("Missing .container file: %s.", containerDir); return null; } try { ContainerData containerData = ContainerDataYaml.readContainerFile(containerFile); if (containerID != containerData.getContainerID()) { - LOG.error("ContainerID in file {} mismatch with expected {}.", + error("ContainerID in file %s mismatch with expected %s.", containerFile, containerID); return null; } @@ -382,16 +377,16 @@ private ContainerData parseContainerData(File containerDir) { KeyValueContainerUtil.parseKVContainerData(kvContainerData, config); return kvContainerData; } else { - LOG.error("Container is not KeyValueContainer type: {}.", + error("Container is not KeyValueContainer type: %s.", containerDir); return null; } } catch (IOException ex) { - LOG.error("Failed to parse ContainerFile: {}.", containerFile, ex); + error("Failed to parse ContainerFile: %s.", containerFile, ex); return null; } } catch (Throwable e) { - LOG.error("Failed to load container: {}.", containerDir, e); + error("Failed to load container: %s.", containerDir, e); return null; } } From 552d61d3cfdb7dcd647e3b66107c0d721a8e239f Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 8 Mar 2025 10:25:20 +0100 Subject: [PATCH 20/29] fix unit test --- .../schemaupgrade/TestUpgradeContainerSchema.java | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java index 3bb4ff7c16f2..d2de6450e9d1 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java @@ -66,6 +66,7 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; +import org.apache.hadoop.ozone.repair.OzoneRepair; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -88,6 +89,7 @@ class TestUpgradeContainerSchema { private BlockManager blockManager; private FilePerBlockStrategy chunkManager; private ContainerSet containerSet; + private List volumes; @BeforeEach public void setup() throws Exception { @@ -112,7 +114,7 @@ public void setup() throws Exception { null, StorageVolume.VolumeType.DATA_VOLUME, null); // create rocksdb instance in volume dir - final List volumes = new ArrayList<>(); + volumes = new ArrayList<>(); for (StorageVolume storageVolume : volumeSet.getVolumesList()) { HddsVolume hddsVolume = (HddsVolume) storageVolume; StorageVolumeUtil.checkVolume(hddsVolume, SCM_ID, SCM_ID, CONF, null, @@ -158,9 +160,12 @@ public void testUpgrade() throws IOException { shutdownAllVolume(); - final List results = - new UpgradeContainerSchema().run(CONF, - StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList())); + UpgradeContainerSchema subject = (UpgradeContainerSchema) new OzoneRepair().getCmd() + .getSubcommands().get("datanode") + .getSubcommands().get("upgrade-container-schema") + .getCommandSpec().userObject(); + + final List results = subject.run(CONF, volumes); checkV3MetaData(keyValueContainerBlockDataMap, results); } From 8e77ce2e8792d5877e9261de9b573a0862b3e945 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Tue, 11 Mar 2025 08:23:47 +0100 Subject: [PATCH 21/29] do not require DN to be IN_MAINTENANCE --- .../schemaupgrade/UpgradeContainerSchema.java | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index 48db93e21d74..6a668eee7e33 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdds.utils.db.DBStore; @@ -164,16 +163,6 @@ public void execute() throws Exception { configuration.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volume); } - final HddsProtos.NodeOperationalState opState = - dnDetail.getPersistedOpState(); - - if (!opState.equals(HddsProtos.NodeOperationalState.IN_MAINTENANCE)) { - error("This command requires the datanode's " + - "NodeOperationalState to be IN_MAINTENANCE, currently is " + - opState); - return; - } - List allVolume = UpgradeUtils.getAllVolume(dnDetail, configuration); From 42818218113d4dd310489c98e097b05e549e0709 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Tue, 11 Mar 2025 08:26:07 +0100 Subject: [PATCH 22/29] use File(File, String) instead of File(String) with concatenation --- .../schemaupgrade/UpgradeContainerSchema.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index 6a668eee7e33..15f741bf7b9e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -152,14 +152,16 @@ public void execute() throws Exception { error("Volume path %s is not a directory or doesn't exist", volume); return; } - File hddsRootDir = new File(volume + "/" + HddsVolume.HDDS_VOLUME_DIR); - File versionFile = new File(volume + "/" + HddsVolume.HDDS_VOLUME_DIR + - "/" + Storage.STORAGE_FILE_VERSION); - if (!hddsRootDir.exists() || !hddsRootDir.isDirectory() || - !versionFile.exists() || !versionFile.isFile()) { + File hddsRootDir = new File(volumeDir, HddsVolume.HDDS_VOLUME_DIR); + if (!hddsRootDir.exists() || !hddsRootDir.isDirectory()) { error("Volume path %s is not a valid data volume", volume); return; } + File versionFile = new File(hddsRootDir, Storage.STORAGE_FILE_VERSION); + if (!versionFile.exists() || !versionFile.isFile()) { + error("Version file %s does not exist", versionFile); + return; + } configuration.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volume); } From 0348abeea34d9429908ead74b443bd6b2352f9bd Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Tue, 11 Mar 2025 09:10:40 +0100 Subject: [PATCH 23/29] print errors --- .../org/apache/hadoop/hdds/cli/AbstractSubcommand.java | 5 +++++ .../java/org/apache/hadoop/hdds/cli/GenericCli.java | 3 ++- .../apache/hadoop/hdds/cli/GenericParentCommand.java | 2 ++ .../org/apache/hadoop/ozone/repair/RepairTool.java | 5 +++++ .../datanode/schemaupgrade/UpgradeContainerSchema.java | 10 +++++----- 5 files changed, 19 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java index 9aeab3c7e88d..b7f7170c2ae4 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java @@ -76,6 +76,11 @@ public boolean isVerbose() { public OzoneConfiguration getOzoneConf() { return conf; } + + @Override + public void printError(Throwable t) { + t.printStackTrace(); + } } protected PrintWriter out() { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java index af8413cc43a4..d5b59da0bce2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java @@ -87,7 +87,8 @@ public int execute(String[] argv) { return cmd.execute(argv); } - protected void printError(Throwable error) { + @Override + public void printError(Throwable error) { //message could be null in case of NPE. This is unexpected so we can //print out the stack trace. if (verbose || Strings.isNullOrEmpty(error.getMessage())) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java index ef0c94c0033f..68cf45e17860 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java @@ -28,4 +28,6 @@ public interface GenericParentCommand { /** Returns a cached configuration, i.e. it is created only once, subsequent calls return the same instance. */ OzoneConfiguration getOzoneConf(); + + void printError(Throwable t); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java index e3255a4a4261..871a901c5da9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java @@ -108,6 +108,11 @@ protected void error(String msg, Object... args) { err().println(formatMessage(msg, args)); } + protected void error(Throwable t, String msg, Object... args) { + error(msg, args); + rootCommand().printError(t); + } + private String formatMessage(String msg, Object[] args) { if (args != null && args.length > 0) { msg = String.format(msg, args); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index 15f741bf7b9e..fdf8d5632a86 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -106,8 +106,8 @@ List run(OzoneConfiguration configuration, List info("Finish upgrading containers on volume %s, %s", hddsVolume.getVolumeRootDir(), result); } catch (Exception e) { - error("Failed to upgrade containers on volume %s", - hddsVolume.getVolumeRootDir(), e); + error(e, "Failed to upgrade containers on volume %s", + hddsVolume.getVolumeRootDir()); } } @@ -309,7 +309,7 @@ public CompletableFuture getUpgradeFuture() { try { UpgradeUtils.createFile(file); } catch (IOException ioe) { - error("Failed to create upgrade complete file %s.", file, ioe); + error(ioe, "Failed to create upgrade complete file %s.", file); } } if (lockFile.exists()) { @@ -373,11 +373,11 @@ private ContainerData parseContainerData(File containerDir) { return null; } } catch (IOException ex) { - error("Failed to parse ContainerFile: %s.", containerFile, ex); + error(ex, "Failed to parse ContainerFile: %s.", containerFile); return null; } } catch (Throwable e) { - error("Failed to load container: %s.", containerDir, e); + error(e, "Failed to load container: %s.", containerDir); return null; } } From f6f57be7f5b6e62d2a69b2c0ba2d666339cd8e46 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Mon, 24 Mar 2025 13:24:42 +0100 Subject: [PATCH 24/29] implement dry-run --- .../schemaupgrade/UpgradeContainerSchema.java | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index fdf8d5632a86..99b698e0ec31 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -270,7 +270,7 @@ public CompletableFuture getUpgradeFuture() { // load DB store try { - hddsVolume.loadDbStore(false); + hddsVolume.loadDbStore(isDryRun()); RawDB db = DatanodeStoreCache.getInstance().getDB( volumeDBPath.getAbsolutePath(), config); dataStore = (DatanodeStoreSchemaThreeImpl) db.getStore(); @@ -425,8 +425,10 @@ private long transferTableData(Table targetTable, String key = DatanodeSchemaThreeDBDefinition .getContainerKeyPrefix(containerData.getContainerID()) + StringUtils.bytes2String(next.getKey()); - targetTable - .put(FixedLengthStringCodec.string2Bytes(key), next.getValue()); + if (!isDryRun()) { + targetTable + .put(FixedLengthStringCodec.string2Bytes(key), next.getValue()); + } } } return count; @@ -453,13 +455,17 @@ private void rewriteAndBackupContainerDataFile(ContainerData containerData, keyValueContainerData.getContainerID() + BACKUP_CONTAINER_DATA_FILE_SUFFIX); - // backup v2 container data file - NativeIO.renameTo(originContainerFile, bakFile); - result.setBackupContainerFilePath(bakFile.getAbsolutePath()); + if (isDryRun()) { + FileUtils.copyFile(originContainerFile, bakFile); + } else { + // backup v2 container data file + NativeIO.renameTo(originContainerFile, bakFile); - // gen new v3 container data file - ContainerDataYaml.createContainerFile(copyContainerData, originContainerFile); + // gen new v3 container data file + ContainerDataYaml.createContainerFile(copyContainerData, originContainerFile); + } + result.setBackupContainerFilePath(bakFile.getAbsolutePath()); result.setNewContainerData(copyContainerData); result.setNewContainerFilePath(originContainerFile.getAbsolutePath()); } From b6d08abae24e78aef36160d108d1e465910f8da8 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Mon, 31 Mar 2025 11:59:00 +0200 Subject: [PATCH 25/29] add fatal() --- .../org/apache/hadoop/ozone/repair/RepairTool.java | 10 ++++++++++ .../datanode/schemaupgrade/UpgradeContainerSchema.java | 8 ++++---- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java index 871a901c5da9..5d582787b468 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java @@ -100,19 +100,29 @@ protected boolean isDryRun() { return dryRun; } + /** Print to stdout the formatted from {@code msg} and {@code args}. */ protected void info(String msg, Object... args) { out().println(formatMessage(msg, args)); } + /** Print to stderr the formatted from {@code msg} and {@code args}. */ protected void error(String msg, Object... args) { err().println(formatMessage(msg, args)); } + /** Print to stderr the message formatted from {@code msg} and {@code args}, + * and also print the exception {@code t}. */ protected void error(Throwable t, String msg, Object... args) { error(msg, args); rootCommand().printError(t); } + /** Fail with {@link IllegalStateException} using the message formatted from {@code msg} and {@code args}. */ + protected void fatal(String msg, Object... args) { + String formatted = formatMessage(msg, args); + throw new IllegalStateException(formatted); + } + private String formatMessage(String msg, Object[] args) { if (args != null && args.length > 0) { msg = String.format(msg, args); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index 99b698e0ec31..6e9fe3ccad02 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -137,7 +137,7 @@ public void execute() throws Exception { if (metadataLayoutFeature.layoutVersion() < needLayoutVersion || softwareLayoutFeature.layoutVersion() < needLayoutVersion) { - error( + fatal( "Please upgrade your software version, no less than %s," + " current metadata layout version is %s," + " software layout version is %s", @@ -149,17 +149,17 @@ public void execute() throws Exception { if (!Strings.isNullOrEmpty(volume)) { File volumeDir = new File(volume); if (!volumeDir.exists() || !volumeDir.isDirectory()) { - error("Volume path %s is not a directory or doesn't exist", volume); + fatal("Volume path %s is not a directory or doesn't exist", volume); return; } File hddsRootDir = new File(volumeDir, HddsVolume.HDDS_VOLUME_DIR); if (!hddsRootDir.exists() || !hddsRootDir.isDirectory()) { - error("Volume path %s is not a valid data volume", volume); + fatal("Volume path %s is not a valid data volume", volume); return; } File versionFile = new File(hddsRootDir, Storage.STORAGE_FILE_VERSION); if (!versionFile.exists() || !versionFile.isFile()) { - error("Version file %s does not exist", versionFile); + fatal("Version file %s does not exist", versionFile); return; } configuration.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volume); From d34517c2c94ed5720dbd5c22276e36260bf2c9ea Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Mon, 24 Mar 2025 14:44:51 +0100 Subject: [PATCH 26/29] fix and improve unit test --- .../schemaupgrade/ContainerUpgradeResult.java | 12 ++ .../schemaupgrade/UpgradeContainerSchema.java | 8 +- .../TestUpgradeContainerSchema.java | 174 +++++++++++++----- 3 files changed, 146 insertions(+), 48 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java index 10b10278a09e..693b564e8f9e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/ContainerUpgradeResult.java @@ -51,6 +51,10 @@ public void setNewContainerData( this.newContainerData = newContainerData; } + ContainerData getNewContainerData() { + return newContainerData; + } + public long getCostMs() { return endTimeMs - startTimeMs; } @@ -63,10 +67,18 @@ public void setBackupContainerFilePath(String backupContainerFilePath) { this.backupContainerFilePath = backupContainerFilePath; } + String getBackupContainerFilePath() { + return backupContainerFilePath; + } + public void setNewContainerFilePath(String newContainerFilePath) { this.newContainerFilePath = newContainerFilePath; } + String getNewContainerFilePath() { + return newContainerFilePath; + } + public void success(long rowCount) { this.totalRow = rowCount; this.endTimeMs = System.currentTimeMillis(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java index 6e9fe3ccad02..a0a7a4de88a6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/UpgradeContainerSchema.java @@ -82,6 +82,8 @@ public class UpgradeContainerSchema extends RepairTool { description = "volume path") private String volume; + private List lastResults; + List run(OzoneConfiguration configuration, List volumes) { List results = new ArrayList<>(); Map> volumeFutures = new HashMap<>(); @@ -184,7 +186,11 @@ public void execute() throws Exception { } // do upgrade - run(configuration, allVolume); + lastResults = run(configuration, allVolume); + } + + List getLastResults() { + return lastResults; } /** diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java index 60c4332ec25e..38dc8c06cc51 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/datanode/schemaupgrade/TestUpgradeContainerSchema.java @@ -17,41 +17,60 @@ package org.apache.hadoop.ozone.repair.datanode.schemaupgrade; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; +import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE; import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask.LOG; +import static org.apache.ozone.test.IntLambda.withTextFromSystemIn; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import com.google.common.collect.Lists; import java.io.File; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; +import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.hadoop.hdds.utils.db.CodecTestUtil; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.hadoop.ozone.container.ContainerTestHelper; +import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -72,19 +91,22 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import picocli.CommandLine; /** * Tests for {@link UpgradeContainerSchema} class. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestUpgradeContainerSchema { private static final String SCM_ID = UUID.randomUUID().toString(); - private static final OzoneConfiguration CONF = new OzoneConfiguration(); + private OzoneConfiguration conf; - @TempDir - private File testRoot; private MutableVolumeSet volumeSet; - private UUID datanodeId; + private DatanodeDetails datanodeDetails; private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; private BlockManager blockManager; @@ -92,41 +114,50 @@ class TestUpgradeContainerSchema { private ContainerSet containerSet; private List volumes; + @BeforeAll + void init() { + CodecBuffer.enableLeakDetection(); + } + @BeforeEach - public void setup() throws Exception { - DatanodeConfiguration dc = CONF.getObject(DatanodeConfiguration.class); + void setup(@TempDir Path testRoot) throws Exception { + conf = new OzoneConfiguration(); + + DatanodeConfiguration dc = conf.getObject(DatanodeConfiguration.class); dc.setContainerSchemaV3Enabled(true); - CONF.setFromObject(dc); + conf.setFromObject(dc); + + final Path volume1Path = Files.createDirectories(testRoot.resolve("volume1").toAbsolutePath()); + final Path volume2Path = Files.createDirectories(testRoot.resolve("volume2").toAbsolutePath()); + final Path metadataPath = Files.createDirectories(testRoot.resolve("metadata").toAbsolutePath()); - final File volume1Path = new File(testRoot, "volume1"); - final File volume2Path = new File(testRoot, "volume2"); + conf.set(HDDS_DATANODE_DIR_KEY, volume1Path + "," + volume2Path); + conf.set(OZONE_METADATA_DIRS, metadataPath.toString()); + + datanodeDetails = MockDatanodeDetails.randomDatanodeDetails(); + } - assertTrue(volume1Path.mkdirs()); - assertTrue(volume2Path.mkdirs()); + private void initDatanode(HDDSLayoutFeature layoutFeature) throws IOException { + DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(conf, + datanodeDetails.getUuidString(), + layoutFeature.layoutVersion()); + layoutStorage.initialize(); - final File metadataPath = new File(testRoot, "metadata"); - assertTrue(metadataPath.mkdirs()); + String idFilePath = Objects.requireNonNull(HddsServerUtil.getDatanodeIdFilePath(conf), "datanode.id path"); + ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, new File(idFilePath), conf); - CONF.set(HDDS_DATANODE_DIR_KEY, - volume1Path.getAbsolutePath() + "," + volume2Path.getAbsolutePath()); - CONF.set(OZONE_METADATA_DIRS, metadataPath.getAbsolutePath()); - datanodeId = UUID.randomUUID(); - volumeSet = new MutableVolumeSet(datanodeId.toString(), SCM_ID, CONF, + volumeSet = new MutableVolumeSet(datanodeDetails.getUuidString(), SCM_ID, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); // create rocksdb instance in volume dir volumes = new ArrayList<>(); for (StorageVolume storageVolume : volumeSet.getVolumesList()) { HddsVolume hddsVolume = (HddsVolume) storageVolume; - StorageVolumeUtil.checkVolume(hddsVolume, SCM_ID, SCM_ID, CONF, null, + StorageVolumeUtil.checkVolume(hddsVolume, SCM_ID, SCM_ID, conf, null, null); volumes.add(hddsVolume); } - DatanodeDetails datanodeDetails = mock(DatanodeDetails.class); - when(datanodeDetails.getUuidString()).thenReturn(datanodeId.toString()); - when(datanodeDetails.getUuid()).thenReturn(datanodeId); - volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class); final AtomicInteger loopCount = new AtomicInteger(0); when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) @@ -137,38 +168,84 @@ public void setup() throws Exception { containerSet = newContainerSet(); - blockManager = new BlockManagerImpl(CONF); + blockManager = new BlockManagerImpl(conf); chunkManager = new FilePerBlockStrategy(true, blockManager); } - @BeforeAll - public static void beforeClass() { - CodecBuffer.enableLeakDetection(); - } - @AfterEach - public void after() throws Exception { + void after() throws Exception { CodecTestUtil.gc(); } @Test - public void testUpgrade() throws IOException { - int num = 2; + void failsBeforeOzoneUpgrade() throws IOException { + initDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V2); + genSchemaV2Containers(1); + shutdownAllVolume(); + List results = runCommand(false, GenericCli.EXECUTION_ERROR_EXIT_CODE); + assertNull(results); + } + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void testUpgrade(boolean dryRun) throws IOException { + initDatanode(HDDSLayoutFeature.DATANODE_SCHEMA_V3); final Map> - keyValueContainerBlockDataMap = genSchemaV2Containers(num); - assertEquals(num, keyValueContainerBlockDataMap.size()); + keyValueContainerBlockDataMap = genSchemaV2Containers(2); shutdownAllVolume(); - UpgradeContainerSchema subject = (UpgradeContainerSchema) new OzoneRepair().getCmd() + List results = runCommand(dryRun, 0); + assertNotNull(results); + assertEquals(2, results.size()); + for (VolumeUpgradeResult result : results) { + assertTrue(result.isSuccess()); + for (ContainerUpgradeResult cr : result.getResultMap().values()) { + assertSame(ContainerUpgradeResult.Status.SUCCESS, cr.getStatus()); + KeyValueContainerData pre = assertInstanceOf(KeyValueContainerData.class, cr.getOriginContainerData()); + KeyValueContainerData post = assertInstanceOf(KeyValueContainerData.class, cr.getNewContainerData()); + assertEquals(SCHEMA_V2, pre.getSchemaVersion()); + assertEquals(SCHEMA_V3, post.getSchemaVersion()); + assertEquals(pre.getState(), post.getState()); + String schemaVersionKey = "schemaVersion\\s*:\\W*"; + assertThat(new File(cr.getBackupContainerFilePath())) + .exists() + .content(UTF_8) + .containsPattern(schemaVersionKey + SCHEMA_V2); + assertThat(new File(cr.getNewContainerFilePath())) + .exists() + .content(UTF_8) + .containsPattern(schemaVersionKey + (dryRun ? SCHEMA_V2 : SCHEMA_V3)); + } + } + + if (!dryRun) { + checkV3MetaData(keyValueContainerBlockDataMap, results); + } + } + + private List runCommand(boolean dryRun, int expectedExitCode) { + CommandLine cmd = new OzoneRepair().getCmd(); + + List argList = Stream.of(HDDS_DATANODE_DIR_KEY, OZONE_METADATA_DIRS) + .flatMap(key -> Stream.of("-D", key + "=" + conf.get(key))) + .collect(Collectors.toList()); + argList.addAll(Arrays.asList("datanode", "upgrade-container-schema")); + if (dryRun) { + argList.add("--dry-run"); + } + + int exitCode = withTextFromSystemIn("y") + .execute(() -> cmd.execute(argList.toArray(new String[0]))); + assertEquals(expectedExitCode, exitCode); + + UpgradeContainerSchema subject = cmd .getSubcommands().get("datanode") .getSubcommands().get("upgrade-container-schema") - .getCommandSpec().userObject(); + .getCommand(); - final List results = subject.run(CONF, volumes); - - checkV3MetaData(keyValueContainerBlockDataMap, results); + return subject.getLastResults(); } private Map putAnyBlockData(KeyValueContainerData data, @@ -183,7 +260,7 @@ private Map putAnyBlockData(KeyValueContainerData data, BlockID blockID = ContainerTestHelper.getTestBlockID(data.getContainerID()); BlockData kd = new BlockData(blockID); - List chunks = Lists.newArrayList(); + List chunks = new ArrayList<>(); putChunksInBlock(1, i, chunks, container, blockID); kd.setChunks(chunks); @@ -230,7 +307,7 @@ private void putChunksInBlock(int numOfChunksPerBlock, int i, private Map> genSchemaV2Containers(int numContainers) throws IOException { - CONF.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, false); + conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, false); // container id ==> blocks final Map> checkBlockDataMap = @@ -243,16 +320,16 @@ private void putChunksInBlock(int numOfChunksPerBlock, int i, KeyValueContainerData data = new KeyValueContainerData(containerId, ContainerLayoutVersion.FILE_PER_BLOCK, ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(), - datanodeId.toString()); - data.setSchemaVersion(OzoneConsts.SCHEMA_V2); + datanodeDetails.getUuidString()); + data.setSchemaVersion(SCHEMA_V2); - KeyValueContainer container = new KeyValueContainer(data, CONF); + KeyValueContainer container = new KeyValueContainer(data, conf); container.create(volumeSet, volumeChoosingPolicy, SCM_ID); containerSet.addContainer(container); data = (KeyValueContainerData) containerSet.getContainer(containerId) .getContainerData(); - data.setSchemaVersion(OzoneConsts.SCHEMA_V2); + data.setSchemaVersion(SCHEMA_V2); final Map blockDataMap = putAnyBlockData(data, container, 10); @@ -262,6 +339,9 @@ private void putChunksInBlock(int numOfChunksPerBlock, int i, checkBlockDataMap.put(data, blockDataMap); } + + assertEquals(numContainers, checkBlockDataMap.size()); + return checkBlockDataMap; } From ac4a71e80f3cc452824e010c3dd02ebaa31ca226 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Mon, 31 Mar 2025 12:13:27 +0200 Subject: [PATCH 27/29] fixup for 8e77ce2e87 do not require DN to be IN_MAINTENANCE --- .../ozone/shell/TestOzoneContainerUpgradeShell.java | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java index 58e78bbbd58c..0be07248a726 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java @@ -26,9 +26,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED; -import static org.apache.ozone.test.GenericTestUtils.captureErr; import static org.apache.ozone.test.IntLambda.withTextFromSystemIn; -import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import com.google.common.base.Preconditions; @@ -64,7 +62,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.repair.OzoneRepair; -import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -136,13 +133,6 @@ public void testNormalContainerUpgrade() throws Exception { // datanode1 test check all pass & upgrade success int exitCode = runUpgrade(datanodeConf); assertEquals(0, exitCode); - - GenericTestUtils.PrintStreamCapturer err = captureErr(); - - // datanode2 NodeOperationalState is IN_SERVICE upgrade fail. - int exit2Code = runUpgrade(datanodeConfigs.get(1)); - assertEquals(0, exit2Code); - assertThat(err.get()).contains("IN_MAINTENANCE"); } private static int runUpgrade(OzoneConfiguration conf) { From 6f175459b445479d8ad4815043398bbf54713bfc Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Wed, 9 Apr 2025 18:17:00 +0200 Subject: [PATCH 28/29] keep original command for referring users to new one --- .../scm/cli/container/ContainerCommands.java | 1 + .../scm/cli/container/UpgradeSubcommand.java | 47 +++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index 42304896febf..72f4e64aff08 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -36,6 +36,7 @@ CreateSubcommand.class, CloseSubcommand.class, ReportSubcommand.class, + UpgradeSubcommand.class, }) @MetaInfServices(AdminSubcommand.class) public class ContainerCommands implements AdminSubcommand { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java new file mode 100644 index 000000000000..cc28150146b0 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.cli.container; + +import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.AbstractSubcommand; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import picocli.CommandLine; + +/** + * @deprecated by {@code ozone repair datanode upgrade-container-schema} + */ +@CommandLine.Command( + name = "upgrade", + description = "Please see `ozone repair datanode upgrade-container-schema`.", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) +@Deprecated +public class UpgradeSubcommand extends AbstractSubcommand implements Callable { + + @CommandLine.Option(names = {"--volume"}, description = "ignored") + private String volume; + + @CommandLine.Option(names = {"-y", "--yes"}, description = "ignored") + private boolean yes; + + @Override + public Void call() throws Exception { + throw new IllegalStateException( + "This command was moved, please use it via `ozone repair datanode upgrade-container-schema` instead."); + } +} From 220cc9f28bee3f90a64fb0e6ba43dca9fc1b21c0 Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Tue, 15 Apr 2025 13:20:44 +0530 Subject: [PATCH 29/29] Fixed incorrect master merge. --- .../cli/container/upgrade/UpgradeManager.java | 173 ------- .../cli/container/upgrade/UpgradeTask.java | 475 ------------------ 2 files changed, 648 deletions(-) delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java deleted file mode 100644 index 6b2f7818f8c5..000000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeManager.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.cli.container.upgrade; - -import com.google.common.annotations.VisibleForTesting; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.StorageVolume; -import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class manages v2 to v3 container upgrade. - */ -public class UpgradeManager { - - private static final Logger LOG = - LoggerFactory.getLogger(UpgradeManager.class); - - private final Map - volumeStoreMap = new ConcurrentHashMap<>(); - - public List run(OzoneConfiguration configuration, - List volumes) throws IOException { - List results = new ArrayList<>(); - Map> volumeFutures = new HashMap<>(); - long startTime = Time.monotonicNow(); - - LOG.info("Start to upgrade {} volume(s)", volumes.size()); - for (StorageVolume volume : volumes) { - final HddsVolume hddsVolume = (HddsVolume) volume; - final UpgradeTask task = - new UpgradeTask(configuration, hddsVolume, volumeStoreMap); - final CompletableFuture future = task.getUpgradeFuture(); - volumeFutures.put(hddsVolume, future); - } - - for (Map.Entry> entry : - volumeFutures.entrySet()) { - final HddsVolume hddsVolume = entry.getKey(); - final CompletableFuture volumeFuture = entry.getValue(); - - try { - final Result result = volumeFuture.get(); - results.add(result); - LOG.info("Finish upgrading containers on volume {}, {}", - hddsVolume.getVolumeRootDir(), result.toString()); - } catch (Exception e) { - LOG.error("Failed to upgrade containers on volume {}", - hddsVolume.getVolumeRootDir(), e); - } - } - - LOG.info("It took {}ms to finish all volume upgrade.", - (Time.monotonicNow() - startTime)); - return results; - } - - @VisibleForTesting - public DatanodeStoreSchemaThreeImpl getDBStore(HddsVolume volume) { - return volumeStoreMap.get(volume.getStorageDir().getAbsolutePath()); - } - - /** - * This class contains v2 to v3 container upgrade result. - */ - public static class Result { - private Map resultMap; - private final HddsVolume hddsVolume; - private final long startTimeMs = Time.monotonicNow(); - private long endTimeMs = 0L; - private Exception e = null; - private Status status = Status.FAIL; - - public Result(HddsVolume hddsVolume) { - this.hddsVolume = hddsVolume; - } - - public HddsVolume getHddsVolume() { - return hddsVolume; - } - - public long getCost() { - return endTimeMs - startTimeMs; - } - - public void setResultList( - List resultList) { - resultMap = new HashMap<>(); - resultList.forEach(res -> resultMap - .put(res.getOriginContainerData().getContainerID(), res)); - } - - public Map getResultMap() { - return resultMap; - } - - public boolean isSuccess() { - return this.status == Status.SUCCESS; - } - - public void success() { - this.endTimeMs = Time.monotonicNow(); - this.status = Status.SUCCESS; - } - - public void fail(Exception exception) { - this.endTimeMs = Time.monotonicNow(); - this.status = Status.FAIL; - this.e = exception; - } - - @Override - public String toString() { - final StringBuilder stringBuilder = new StringBuilder(); - stringBuilder.append("Result:{"); - stringBuilder.append("hddsRootDir="); - stringBuilder.append(getHddsVolume().getHddsRootDir()); - stringBuilder.append(", resultList="); - AtomicLong total = new AtomicLong(0L); - if (resultMap != null) { - resultMap.forEach((k, r) -> { - stringBuilder.append(r.toString()); - stringBuilder.append("\n"); - total.addAndGet(r.getTotalRow()); - }); - } - stringBuilder.append(", totalRow="); - stringBuilder.append(total.get()); - stringBuilder.append(", costMs="); - stringBuilder.append(getCost()); - stringBuilder.append(", status="); - stringBuilder.append(status); - if (e != null) { - stringBuilder.append(", Exception="); - stringBuilder.append(e); - } - stringBuilder.append('}'); - return stringBuilder.toString(); - } - - enum Status { - SUCCESS, - FAIL - } - } - -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java deleted file mode 100644 index 262abc57fd2d..000000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/UpgradeTask.java +++ /dev/null @@ -1,475 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.cli.container.upgrade; - -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME; - -import com.google.common.base.Preconditions; -import java.io.File; -import java.io.IOException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache; -import org.apache.hadoop.ozone.container.common.utils.RawDB; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; -import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaTwoDBDefinition; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; -import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class implements the v2 to v3 container upgrade process. - */ -public class UpgradeTask { - - private static final Logger LOG = - LoggerFactory.getLogger(UpgradeTask.class); - - private final ConfigurationSource config; - private final HddsVolume hddsVolume; - private DatanodeStoreSchemaThreeImpl dataStore; - private final Map volumeStoreMap; - - private static final String BACKUP_CONTAINER_DATA_FILE_SUFFIX = ".backup"; - public static final String UPGRADE_COMPLETE_FILE_NAME = "upgrade.complete"; - public static final String UPGRADE_LOCK_FILE_NAME = "upgrade.lock"; - - private static final Set COLUMN_FAMILIES_NAME = - (new DatanodeSchemaTwoDBDefinition("", new OzoneConfiguration())) - .getMap().keySet(); - - public UpgradeTask(ConfigurationSource config, HddsVolume hddsVolume, - Map storeMap) { - this.config = config; - this.hddsVolume = hddsVolume; - this.volumeStoreMap = storeMap; - } - - public CompletableFuture getUpgradeFuture() { - final File lockFile = UpgradeUtils.getVolumeUpgradeLockFile(hddsVolume); - - return CompletableFuture.supplyAsync(() -> { - - final UpgradeManager.Result result = - new UpgradeManager.Result(hddsVolume); - - List resultList = new ArrayList<>(); - final File hddsVolumeRootDir = hddsVolume.getHddsRootDir(); - - Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" + - "cannot be null"); - - // check CID directory and current file - File clusterIDDir = new File(hddsVolume.getStorageDir(), - hddsVolume.getClusterID()); - if (!clusterIDDir.exists() || !clusterIDDir.isDirectory()) { - result.fail(new Exception("Volume " + hddsVolumeRootDir + - " is in an inconsistent state. Expected " + - "clusterID directory " + clusterIDDir + - " is not found or not a directory.")); - return result; - } - File currentDir = new File(clusterIDDir, Storage.STORAGE_DIR_CURRENT); - if (!currentDir.exists() || !currentDir.isDirectory()) { - result.fail(new Exception( - "Current dir " + currentDir + " is not found or not a directory," - + " skip upgrade.")); - return result; - } - - try { - // create lock file - if (!lockFile.createNewFile()) { - result.fail(new Exception("Upgrade lock file already exists " + - lockFile.getAbsolutePath() + ", skip upgrade.")); - return result; - } - } catch (IOException e) { - result.fail(new Exception("Failed to create upgrade lock file " + - lockFile.getAbsolutePath() + ", skip upgrade.")); - return result; - } - - // check complete file again - final File completeFile = - UpgradeUtils.getVolumeUpgradeCompleteFile(hddsVolume); - if (completeFile.exists()) { - result.fail(new Exception("Upgrade complete file already exists " + - completeFile.getAbsolutePath() + ", skip upgrade.")); - if (!lockFile.delete()) { - LOG.warn("Failed to delete upgrade lock file {}.", lockFile); - } - return result; - } - - // backup DB directory - final File volumeDBPath; - try { - volumeDBPath = getVolumeDBPath(hddsVolume); - dbBackup(volumeDBPath); - } catch (IOException e) { - result.fail(new Exception(e.getMessage() + ", skip upgrade.")); - return result; - } - - // load DB store - try { - hddsVolume.loadDbStore(false); - RawDB db = DatanodeStoreCache.getInstance().getDB( - volumeDBPath.getAbsolutePath(), config); - dataStore = (DatanodeStoreSchemaThreeImpl) db.getStore(); - volumeStoreMap.put( - hddsVolume.getStorageDir().getAbsolutePath(), dataStore); - } catch (IOException e) { - result.fail(new Exception( - "Failed to load db for volume " + hddsVolume.getVolumeRootDir() + - " for " + e.getMessage() + ", skip upgrade.")); - return result; - } - - LOG.info("Start to upgrade containers on volume {}", - hddsVolume.getVolumeRootDir()); - File[] containerTopDirs = currentDir.listFiles(); - if (containerTopDirs != null) { - for (File containerTopDir : containerTopDirs) { - try { - final List results = - upgradeSubContainerDir(containerTopDir); - resultList.addAll(results); - } catch (IOException e) { - result.fail(e); - return result; - } - } - } - - result.setResultList(resultList); - result.success(); - return result; - }).whenComplete((r, e) -> { - final File hddsRootDir = r.getHddsVolume().getHddsRootDir(); - final File file = - UpgradeUtils.getVolumeUpgradeCompleteFile(r.getHddsVolume()); - // create a flag file - if (e == null && r.isSuccess()) { - try { - UpgradeUtils.createFile(file); - } catch (IOException ioe) { - LOG.warn("Failed to create upgrade complete file {}.", file, ioe); - } - } - if (lockFile.exists()) { - boolean deleted = lockFile.delete(); - if (!deleted) { - LOG.warn("Failed to delete upgrade lock file {}.", file); - } - } - }); - } - - private List upgradeSubContainerDir( - File containerTopDir) throws IOException { - List resultList = new ArrayList<>(); - if (containerTopDir.isDirectory()) { - File[] containerDirs = containerTopDir.listFiles(); - if (containerDirs != null) { - for (File containerDir : containerDirs) { - final ContainerData containerData = parseContainerData(containerDir); - if (containerData != null && - ((KeyValueContainerData) containerData) - .hasSchema(OzoneConsts.SCHEMA_V2)) { - final UpgradeContainerResult result = - new UpgradeContainerResult(containerData); - upgradeContainer(containerData, result); - resultList.add(result); - } - } - } - } - return resultList; - } - - private ContainerData parseContainerData(File containerDir) { - try { - File containerFile = ContainerUtils.getContainerFile(containerDir); - long containerID = ContainerUtils.getContainerID(containerDir); - if (!containerFile.exists()) { - LOG.error("Missing .container file: {}.", containerDir); - return null; - } - try { - ContainerData containerData = - ContainerDataYaml.readContainerFile(containerFile); - if (containerID != containerData.getContainerID()) { - LOG.error("ContainerID in file {} mismatch with expected {}.", - containerFile, containerID); - return null; - } - if (containerData.getContainerType().equals( - ContainerProtos.ContainerType.KeyValueContainer) && - containerData instanceof KeyValueContainerData) { - KeyValueContainerData kvContainerData = - (KeyValueContainerData) containerData; - containerData.setVolume(hddsVolume); - KeyValueContainerUtil.parseKVContainerData(kvContainerData, config); - return kvContainerData; - } else { - LOG.error("Container is not KeyValueContainer type: {}.", - containerDir); - return null; - } - } catch (IOException ex) { - LOG.error("Failed to parse ContainerFile: {}.", containerFile, ex); - return null; - } - } catch (Throwable e) { - LOG.error("Failed to load container: {}.", containerDir, e); - return null; - } - } - - private void upgradeContainer(ContainerData containerData, - UpgradeContainerResult result) throws IOException { - final DBStore targetDBStore = dataStore.getStore(); - - // open container schema v2 rocksdb - final DatanodeStore dbStore = BlockUtils - .getUncachedDatanodeStore((KeyValueContainerData) containerData, config, - true); - final DBStore sourceDBStore = dbStore.getStore(); - - long total = 0L; - for (String tableName : COLUMN_FAMILIES_NAME) { - total += transferTableData(targetDBStore, sourceDBStore, tableName, - containerData); - } - - rewriteAndBackupContainerDataFile(containerData, result); - result.success(total); - } - - private long transferTableData(DBStore targetDBStore, - DBStore sourceDBStore, String tableName, ContainerData containerData) - throws IOException { - final Table deleteTransactionTable = - sourceDBStore.getTable(tableName); - final Table targetDeleteTransactionTable = - targetDBStore.getTable(tableName); - return transferTableData(targetDeleteTransactionTable, - deleteTransactionTable, containerData); - } - - private long transferTableData(Table targetTable, - Table sourceTable, ContainerData containerData) - throws IOException { - long count = 0; - try (TableIterator> - iter = sourceTable.iterator()) { - while (iter.hasNext()) { - count++; - Table.KeyValue next = iter.next(); - String key = DatanodeSchemaThreeDBDefinition - .getContainerKeyPrefix(containerData.getContainerID()) - + StringUtils.bytes2String(next.getKey()); - targetTable - .put(FixedLengthStringCodec.string2Bytes(key), next.getValue()); - } - } - return count; - } - - private void rewriteAndBackupContainerDataFile(ContainerData containerData, - UpgradeContainerResult result) throws IOException { - if (containerData instanceof KeyValueContainerData) { - final KeyValueContainerData keyValueContainerData = - (KeyValueContainerData) containerData; - - final KeyValueContainerData copyContainerData = - new KeyValueContainerData(keyValueContainerData); - - copyContainerData.setSchemaVersion(OzoneConsts.SCHEMA_V3); - copyContainerData.setState(keyValueContainerData.getState()); - copyContainerData.setVolume(keyValueContainerData.getVolume()); - - final File originContainerFile = KeyValueContainer - .getContainerFile(keyValueContainerData.getMetadataPath(), - keyValueContainerData.getContainerID()); - - final File bakFile = new File(keyValueContainerData.getMetadataPath(), - keyValueContainerData.getContainerID() + - BACKUP_CONTAINER_DATA_FILE_SUFFIX); - - // backup v2 container data file - NativeIO.renameTo(originContainerFile, bakFile); - result.setBackupContainerFilePath(bakFile.getAbsolutePath()); - - // gen new v3 container data file - ContainerDataYaml.createContainerFile(copyContainerData, originContainerFile); - - result.setNewContainerData(copyContainerData); - result.setNewContainerFilePath(originContainerFile.getAbsolutePath()); - } - } - - public File getVolumeDBPath(HddsVolume volume) throws IOException { - File clusterIdDir = new File(volume.getStorageDir(), volume.getClusterID()); - File storageIdDir = new File(clusterIdDir, volume.getStorageID()); - File containerDBPath = new File(storageIdDir, CONTAINER_DB_NAME); - if (containerDBPath.exists() && containerDBPath.isDirectory()) { - return containerDBPath; - } else { - throw new IOException("DB " + containerDBPath + - " doesn't exist or is not a directory"); - } - } - - public void dbBackup(File dbPath) throws IOException { - final File backup = new File(dbPath.getParentFile(), - new SimpleDateFormat("yyyy-MM-dd'T'HH-mm-ss").format(new Date()) + - "-" + dbPath.getName() + ".backup"); - if (backup.exists()) { - throw new IOException("Backup dir " + backup + "already exists"); - } else { - FileUtils.copyDirectory(dbPath, backup, true); - System.out.println("DB " + dbPath + " is backup to " + backup); - } - } - - /** - * This class represents upgrade v2 to v3 container result. - */ - public static class UpgradeContainerResult { - private final ContainerData originContainerData; - private ContainerData newContainerData; - private long totalRow = 0L; - private final long startTimeMs = Time.monotonicNow(); - private long endTimeMs = 0L; - private Status status; - - private String backupContainerFilePath; - private String newContainerFilePath; - - public UpgradeContainerResult( - ContainerData originContainerData) { - this.originContainerData = originContainerData; - this.status = Status.FAIL; - } - - public long getTotalRow() { - return totalRow; - } - - public Status getStatus() { - return status; - } - - public void setNewContainerData( - ContainerData newContainerData) { - this.newContainerData = newContainerData; - } - - public long getCostMs() { - return endTimeMs - startTimeMs; - } - - public ContainerData getOriginContainerData() { - return originContainerData; - } - - public ContainerData getNewContainerData() { - return newContainerData; - } - - public void setBackupContainerFilePath(String backupContainerFilePath) { - this.backupContainerFilePath = backupContainerFilePath; - } - - public void setNewContainerFilePath(String newContainerFilePath) { - this.newContainerFilePath = newContainerFilePath; - } - - public void success(long rowCount) { - this.totalRow = rowCount; - this.endTimeMs = Time.monotonicNow(); - this.status = Status.SUCCESS; - } - - @Override - public String toString() { - final StringBuilder stringBuilder = new StringBuilder(); - stringBuilder.append("Result:{"); - stringBuilder.append("containerID="); - stringBuilder.append(originContainerData.getContainerID()); - stringBuilder.append(", originContainerSchemaVersion="); - stringBuilder.append( - ((KeyValueContainerData) originContainerData).getSchemaVersion()); - - if (newContainerData != null) { - stringBuilder.append(", schemaV2ContainerFileBackupPath="); - stringBuilder.append(backupContainerFilePath); - - stringBuilder.append(", newContainerSchemaVersion="); - stringBuilder.append( - ((KeyValueContainerData) newContainerData).getSchemaVersion()); - - stringBuilder.append(", schemaV3ContainerFilePath="); - stringBuilder.append(newContainerFilePath); - } - stringBuilder.append(", totalRow="); - stringBuilder.append(totalRow); - stringBuilder.append(", costMs="); - stringBuilder.append(getCostMs()); - stringBuilder.append(", status="); - stringBuilder.append(status); - stringBuilder.append("}"); - return stringBuilder.toString(); - } - - enum Status { - SUCCESS, - FAIL - } - } -}