diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 916bd7fdfb03..1dffca6d573a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; @@ -420,4 +421,7 @@ StatusAndMessages finalizeScmUpgrade(String upgradeClientID) StatusAndMessages queryUpgradeFinalizationProgress( String upgradeClientID, boolean force, boolean readonly) throws IOException; + + DecommissionScmResponseProto decommissionScm( + String scmId) throws IOException; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 7690b2eefb32..7cdb7bf7d576 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type; import org.apache.hadoop.hdds.scm.DatanodeAdminError; @@ -349,6 +350,7 @@ List getFailedDeletedBlockTxn(int count, Map> getSafeModeRuleStatuses() throws IOException; + /** * Force SCM out of Safe mode. * @@ -441,6 +443,7 @@ StatusAndMessages finalizeScmUpgrade(String upgradeClientID) StatusAndMessages queryUpgradeFinalizationProgress( String upgradeClientID, boolean force, boolean readonly) throws IOException; + /** * Obtain a token which can be used to let datanodes verify authentication of * commands operating on {@code containerID}. @@ -455,4 +458,7 @@ long getContainerCount(HddsProtos.LifeCycleState state) List getListOfContainers( long startContainerID, int count, HddsProtos.LifeCycleState state) throws IOException; + + DecommissionScmResponseProto decommissionScm( + String scmId) throws IOException; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index c664a42ae661..79ea96693315 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -49,6 +49,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionNodesRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionNodesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; @@ -1076,4 +1078,19 @@ public List getListOfContainers( throws IOException { return listContainer(startContainerID, count, state); } + + @Override + public DecommissionScmResponseProto decommissionScm( + String scmId) throws IOException { + + DecommissionScmRequestProto request = DecommissionScmRequestProto + .newBuilder() + .setScmId(scmId) + .build(); + DecommissionScmResponseProto response = + submitRequest(Type.DecommissionScm, + builder -> builder.setDecommissionScmRequest(request)) + .getDecommissionScmResponse(); + return response; + } } diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index d5a3c6f65ac7..b4ffd37a249a 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -80,6 +80,7 @@ message ScmContainerLocationRequest { optional ResetDeletedBlockRetryCountRequestProto resetDeletedBlockRetryCountRequest = 41; optional TransferLeadershipRequestProto transferScmLeadershipRequest = 42; optional GetFailedDeletedBlocksTxnRequestProto getFailedDeletedBlocksTxnRequest = 43; + optional DecommissionScmRequestProto decommissionScmRequest = 44; } message ScmContainerLocationResponse { @@ -131,6 +132,7 @@ message ScmContainerLocationResponse { optional ResetDeletedBlockRetryCountResponseProto resetDeletedBlockRetryCountResponse = 41; optional TransferLeadershipResponseProto transferScmLeadershipResponse = 42; optional GetFailedDeletedBlocksTxnResponseProto getFailedDeletedBlocksTxnResponse = 43; + optional DecommissionScmResponseProto decommissionScmResponse = 44; enum Status { OK = 1; @@ -181,6 +183,7 @@ enum Type { GetClosedContainerCount = 37; TransferLeadership = 38; GetFailedDeletedBlocksTransaction = 39; + DecommissionScm = 40; } /** @@ -573,6 +576,15 @@ message ContainerBalancerStatusResponseProto { required bool isRunning = 1; } +message DecommissionScmRequestProto { + required string scmId = 1; +} + +message DecommissionScmResponseProto { + required bool success = 1; + optional string errorMsg = 2; +} + /** * Protocol used from an HDFS node to StorageContainerManager. See the request * and response messages for details of the RPC calls. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 2dc3c5b04fa8..6bbe250a4ec7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -43,6 +43,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionNodesRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionNodesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.FinalizeScmUpgradeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.FinalizeScmUpgradeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto; @@ -683,6 +685,13 @@ public ScmContainerLocationResponse processRequest( transferScmLeadership( request.getTransferScmLeadershipRequest())) .build(); + case DecommissionScm: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setDecommissionScmResponse(decommissionScm( + request.getDecommissionScmRequest())) + .build(); default: throw new IllegalArgumentException( "Unknown command type: " + request.getCmdType()); @@ -1210,4 +1219,10 @@ public TransferLeadershipResponseProto transferScmLeadership( impl.transferLeadership(newLeaderId); return TransferLeadershipResponseProto.getDefaultInstance(); } + + public DecommissionScmResponseProto decommissionScm( + DecommissionScmRequestProto request) throws IOException { + return impl.decommissionScm( + request.getScmId()); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 6ee278b7a7eb..f62697fb7a5b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -39,6 +39,8 @@ import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.ReconfigureProtocolService; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto.Builder; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolServerSideTranslatorPB; @@ -1324,4 +1326,21 @@ public List listReconfigureProperties() throws IOException { public void close() throws IOException { stop(); } + + @Override + public DecommissionScmResponseProto decommissionScm( + String scmId) { + Builder decommissionScmResponseBuilder = + DecommissionScmResponseProto.newBuilder(); + + try { + decommissionScmResponseBuilder + .setSuccess(scm.removePeerFromHARing(scmId)); + } catch (IOException ex) { + decommissionScmResponseBuilder + .setSuccess(false) + .setErrorMsg(ex.getMessage()); + } + return decommissionScmResponseBuilder.build(); + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java new file mode 100644 index 000000000000..e6df13e924a8 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.server; + +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmRequestProto; +import org.apache.hadoop.hdds.scm.HddsTestUtils; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; +import org.apache.ozone.test.GenericTestUtils; + +import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertTrue; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.mockito.Mockito; + +import java.io.File; + +/** + * Unit tests to validate the SCMClientProtocolServer + * servicing commands from the scm client. + */ +public class TestSCMClientProtocolServer { + private OzoneConfiguration config; + private SCMClientProtocolServer server; + private StorageContainerManager scm; + private StorageContainerLocationProtocolServerSideTranslatorPB service; + + @BeforeEach + public void setUp() throws Exception { + config = new OzoneConfiguration(); + File dir = GenericTestUtils.getRandomizedTestDir(); + config.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); + SCMConfigurator configurator = new SCMConfigurator(); + configurator.setSCMHAManager(SCMHAManagerStub.getInstance(true)); + configurator.setScmContext(SCMContext.emptyContext()); + scm = HddsTestUtils.getScm(config, configurator); + scm.start(); + scm.exitSafeMode(); + + server = scm.getClientProtocolServer(); + service = new StorageContainerLocationProtocolServerSideTranslatorPB(server, + scm, Mockito.mock(ProtocolMessageMetrics.class)); + } + + @AfterEach + public void tearDown() throws Exception { + if (scm != null) { + scm.stop(); + scm.join(); + } + } + + /** + * Tests decommissioning of scm. + */ + @Test + public void testScmDecommissionRemoveScmErrors() throws Exception { + String scmId = scm.getScmId(); + String err = "Cannot remove current leader."; + + DecommissionScmRequestProto request = + DecommissionScmRequestProto.newBuilder() + .setScmId(scmId) + .build(); + + DecommissionScmResponseProto resp = + service.decommissionScm(request); + + // should have optional error message set in response + assertTrue(resp.hasErrorMsg()); + assertTrue(resp.getErrorMsg() + .equals(err)); + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 22050f194009..e8b657ecb192 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadContainerResponseProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -509,4 +510,12 @@ public StatusAndMessages queryUpgradeFinalizationProgress( return storageContainerLocationClient.queryUpgradeFinalizationProgress( upgradeClientID, force, readonly); } + + @Override + public DecommissionScmResponseProto decommissionScm( + String scmId) + throws IOException { + return storageContainerLocationClient.decommissionScm(scmId); + } + } diff --git a/hadoop-ozone/dev-support/intellij/ozone-site.xml b/hadoop-ozone/dev-support/intellij/ozone-site.xml index 4052c096104d..455509782e2f 100644 --- a/hadoop-ozone/dev-support/intellij/ozone-site.xml +++ b/hadoop-ozone/dev-support/intellij/ozone-site.xml @@ -15,6 +15,10 @@ limitations under the License. --> + + ozone.default.bucket.layout + LEGACY + hdds.profiler.endpoint.enabled true @@ -96,4 +100,4 @@ ozone.metastore.rocksdb.statistics ALL - \ No newline at end of file + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/DecommissionScmSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/DecommissionScmSubcommand.java new file mode 100644 index 000000000000..e1a77561d8ae --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/DecommissionScmSubcommand.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.admin.scm; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import picocli.CommandLine; + +import java.io.IOException; + +/** + * Handler of ozone admin scm decommission command. + */ +@CommandLine.Command( + name = "decommission", + description = "Decommission SCM . Includes removing from ratis " + + "ring and removing its certificate from certStore", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) + + +public class DecommissionScmSubcommand extends ScmSubcommand { + @CommandLine.ParentCommand + private ScmAdmin parent; + + @CommandLine.Option(names = {"-nodeid", "--nodeid"}, + description = "NodeID of the SCM to be decommissioned.", + required = true) + private String nodeId; + + @Override + public void execute(ScmClient scmClient) throws IOException { + DecommissionScmResponseProto response = scmClient.decommissionScm(nodeId); + if (!response.getSuccess()) { + System.out.println("Error decommissioning Scm " + nodeId); + if (response.hasErrorMsg()) { + System.out.println(response.getErrorMsg()); + } + } else { + System.out.println("Decommissioned Scm " + nodeId); + } + } +} + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java index a7f96dee84b3..fbc5a3b52b00 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java @@ -39,7 +39,8 @@ FinalizeScmUpgradeSubcommand.class, FinalizationScmStatusSubcommand.class, TransferScmLeaderSubCommand.class, - DeletedBlocksTxnCommands.class + DeletedBlocksTxnCommands.class, + DecommissionScmSubcommand.class }) @MetaInfServices(SubcommandWithParent.class) public class ScmAdmin extends GenericCli implements SubcommandWithParent { diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java new file mode 100644 index 000000000000..794a99e5271c --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.scm; + +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionScmResponseProto; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.ozone.admin.scm.DecommissionScmSubcommand; +import org.apache.ozone.test.GenericTestUtils; + +import java.util.UUID; + +import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertTrue; +import org.mockito.Mockito; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import picocli.CommandLine; + +/** + * Unit tests to validate the TestScmDecommissionSubCommand class includes the + * correct output when executed against a mock client. + */ +public class TestDecommissionScmSubcommand { + + @Test + public void testDecommissionScmInputParams() throws Exception { + // requires String + DecommissionScmSubcommand cmd = new DecommissionScmSubcommand(); + ScmClient client = mock(ScmClient.class); + OzoneAdmin admin = new OzoneAdmin(); + + try (GenericTestUtils.SystemErrCapturer capture = + new GenericTestUtils.SystemErrCapturer()) { + String[] args = {"scm", "decommission"}; + admin.execute(args); + assertTrue(capture.getOutput().contains( + "Usage: ozone admin scm decommission")); + } + + // now give required String + CommandLine c1 = new CommandLine(cmd); + String scmId = UUID.randomUUID().toString(); + c1.parseArgs("--nodeid=" + scmId); + + DecommissionScmResponseProto response = + DecommissionScmResponseProto.newBuilder() + .setSuccess(true) + .build(); + + Mockito.when(client.decommissionScm(any())) + .thenAnswer(invocation -> ( + response)); + + try (GenericTestUtils.SystemOutCapturer capture = + new GenericTestUtils.SystemOutCapturer()) { + cmd.execute(client); + assertTrue(capture.getOutput().contains( + scmId)); + } + } + + @Test + public void testDecommissionScmScmRemoveErrors() throws Exception { + // requires String + DecommissionScmSubcommand cmd = new DecommissionScmSubcommand(); + ScmClient client = mock(ScmClient.class); + + CommandLine c1 = new CommandLine(cmd); + String scmId = UUID.randomUUID().toString(); + c1.parseArgs("--nodeid=" + scmId); + + DecommissionScmResponseProto response = + DecommissionScmResponseProto.newBuilder() + .setSuccess(false) + .setErrorMsg("Removal of primordial node is not supported") + .build(); + + Mockito.when(client.decommissionScm(any())) + .thenAnswer(invocation -> ( + response)); + + try (GenericTestUtils.SystemOutCapturer capture = + new GenericTestUtils.SystemOutCapturer()) { + cmd.execute(client); + assertTrue(capture.getOutput().contains( + "Removal of primordial")); + } + } + + // TODO: test decommission revoke certificate + +}