From 1daa2c41a505359424cbb63e5c2443723c1fa877 Mon Sep 17 00:00:00 2001 From: HuangTao Date: Wed, 5 Aug 2020 09:21:38 +0800 Subject: [PATCH 1/7] HDDS-4039. Reduce the number of fields in hdds.proto to improve performance --- .../hadoop/hdds/protocol/DatanodeDetails.java | 426 +++++++++++------- .../hdds/protocol/MockDatanodeDetails.java | 15 + .../hadoop/ozone/HddsDatanodeService.java | 18 +- .../statemachine/DatanodeStateMachine.java | 4 +- .../states/datanode/RunningDatanodeState.java | 2 +- .../states/endpoint/RegisterEndpointTask.java | 7 +- .../StorageContainerDatanodeProtocol.java | 15 +- .../StorageContainerNodeProtocol.java | 2 +- ...atanodeProtocolClientSideTranslatorPB.java | 10 +- ...atanodeProtocolServerSideTranslatorPB.java | 3 +- .../ozone/container/common/ScmTestMock.java | 5 +- .../src/main/proto/hdds.proto | 16 +- .../src/main/resources/proto.lock | 35 +- .../ScmServerDatanodeHeartbeatProtocol.proto | 12 +- .../src/main/resources/proto.lock | 9 +- .../hadoop/hdds/scm/node/DatanodeInfo.java | 2 +- .../hadoop/hdds/scm/node/NodeManager.java | 4 +- .../hdds/scm/node/NodeStateManager.java | 74 +-- .../hadoop/hdds/scm/node/SCMNodeManager.java | 4 +- .../hdds/scm/node/states/NodeStateMap.java | 4 +- .../scm/server/SCMDatanodeProtocolServer.java | 5 + .../ozone/container/common/TestEndPoint.java | 5 +- .../hadoop/ozone/recon/api/NodeEndpoint.java | 8 +- .../hadoop/ozone/recon/api/TestEndpoints.java | 91 ++-- 24 files changed, 491 insertions(+), 285 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index a3db139b96ce..b62ae27c98ff 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -35,6 +35,9 @@ * - UUID of the DataNode. * - IP and Hostname details. * - Port details to which the DataNode will be listening. + * and may also include some extra info like: + * - version of the DataNode + * - setup time etc. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -49,10 +52,7 @@ public class DatanodeDetails extends NodeImpl implements private String hostName; private List ports; private String certSerialId; - private String version; - private long setupTime; - private String revision; - private String buildDate; + private ExtraDatanodeDetails extraDatanodeDetails; /** * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used @@ -63,25 +63,15 @@ public class DatanodeDetails extends NodeImpl implements * @param networkLocation DataNode's network location path * @param ports Ports used by the DataNode * @param certSerialId serial id from SCM issued certificate. - * @param version DataNode's version - * @param setupTime the setup time of DataNode - * @param revision DataNodes's revision - * @param buildDate DataNodes's build timestamp */ - @SuppressWarnings("parameternumber") - private DatanodeDetails(UUID uuid, String ipAddress, String hostName, - String networkLocation, List ports, String certSerialId, - String version, long setupTime, String revision, String buildDate) { + protected DatanodeDetails(UUID uuid, String ipAddress, String hostName, + String networkLocation, List ports, String certSerialId) { super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT); this.uuid = uuid; this.ipAddress = ipAddress; this.hostName = hostName; this.ports = ports; this.certSerialId = certSerialId; - this.version = version; - this.setupTime = setupTime; - this.revision = revision; - this.buildDate = buildDate; } public DatanodeDetails(DatanodeDetails datanodeDetails) { @@ -93,10 +83,9 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) { this.ports = datanodeDetails.ports; this.setNetworkName(datanodeDetails.getNetworkName()); this.setParent(datanodeDetails.getParent()); - this.version = datanodeDetails.version; - this.setupTime = datanodeDetails.setupTime; - this.revision = datanodeDetails.revision; - this.buildDate = datanodeDetails.buildDate; + if (datanodeDetails.getExtraDatanodeDetails() != null) { + this.extraDatanodeDetails = datanodeDetails.getExtraDatanodeDetails(); + } } /** @@ -190,6 +179,25 @@ public Port getPort(Port.Name name) { return null; } + /** + * Returns DataNode extended fields. + * + * @return ExtraDatanodeDetails + */ + public ExtraDatanodeDetails getExtraDatanodeDetails() { + return extraDatanodeDetails; + } + + /** + * Sets a DataNode extended fields. + * + * @param extraDatanodeDetails DataNode extended fields + */ + public void setExtraDatanodeDetails( + ExtraDatanodeDetails extraDatanodeDetails) { + this.extraDatanodeDetails = extraDatanodeDetails; + } + /** * Returns a DatanodeDetails from the protocol buffers. * @@ -225,18 +233,6 @@ public static DatanodeDetails getFromProtoBuf( if (datanodeDetailsProto.hasNetworkLocation()) { builder.setNetworkLocation(datanodeDetailsProto.getNetworkLocation()); } - if (datanodeDetailsProto.hasVersion()) { - builder.setVersion(datanodeDetailsProto.getVersion()); - } - if (datanodeDetailsProto.hasSetupTime()) { - builder.setSetupTime(datanodeDetailsProto.getSetupTime()); - } - if (datanodeDetailsProto.hasRevision()) { - builder.setRevision(datanodeDetailsProto.getRevision()); - } - if (datanodeDetailsProto.hasBuildDate()) { - builder.setBuildDate(datanodeDetailsProto.getBuildDate()); - } return builder.build(); } @@ -279,19 +275,6 @@ public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { .build()); } - if (!Strings.isNullOrEmpty(getVersion())) { - builder.setVersion(getVersion()); - } - - builder.setSetupTime(getSetupTime()); - - if (!Strings.isNullOrEmpty(getRevision())) { - builder.setRevision(getRevision()); - } - if (!Strings.isNullOrEmpty(getBuildDate())) { - builder.setBuildDate(getBuildDate()); - } - return builder.build(); } @@ -344,16 +327,13 @@ public static final class Builder { private String networkLocation; private List ports; private String certSerialId; - private String version; - private long setupTime; - private String revision; - private String buildDate; + private ExtraDatanodeDetails extraDatanodeDetails; /** * Default private constructor. To create Builder instance use * DatanodeDetails#newBuilder. */ - private Builder() { + protected Builder() { ports = new ArrayList<>(); } @@ -437,50 +417,15 @@ public Builder setCertSerialId(String certId) { } /** - * Sets the DataNode version. - * - * @param ver the version of DataNode. - * - * @return DatanodeDetails.Builder - */ - public Builder setVersion(String ver) { - this.version = ver; - return this; - } - - /** - * Sets the DataNode revision. + * Adds extra DataNode info. * - * @param rev the revision of DataNode. + * @param extraDatanodeDetails extra DataNode info. * * @return DatanodeDetails.Builder */ - public Builder setRevision(String rev) { - this.revision = rev; - return this; - } - - /** - * Sets the DataNode build date. - * - * @param date the build date of DataNode. - * - * @return DatanodeDetails.Builder - */ - public Builder setBuildDate(String date) { - this.buildDate = date; - return this; - } - - /** - * Sets the DataNode setup time. - * - * @param time the setup time of DataNode. - * - * @return DatanodeDetails.Builder - */ - public Builder setSetupTime(long time) { - this.setupTime = time; + public Builder setExtraDatanodeDetails( + ExtraDatanodeDetails extraDatanodeDetails) { + this.extraDatanodeDetails = extraDatanodeDetails; return this; } @@ -495,11 +440,13 @@ public DatanodeDetails build() { networkLocation = NetConstants.DEFAULT_RACK; } DatanodeDetails dn = new DatanodeDetails(id, ipAddress, hostName, - networkLocation, ports, certSerialId, - version, setupTime, revision, buildDate); + networkLocation, ports, certSerialId); if (networkName != null) { dn.setNetworkName(networkName); } + if (extraDatanodeDetails != null) { + dn.setExtraDatanodeDetails(extraDatanodeDetails); + } return dn; } } @@ -604,74 +551,251 @@ public void setCertSerialId(String certSerialId) { } /** - * Returns the DataNode version. + * Constructs ExtraDatanodeDetails instance. * - * @return DataNode version - */ - public String getVersion() { - return version; - } - - /** - * Set DataNode version. + * @param version DataNode's version + * @param setupTime the setup time of DataNode + * @param revision DataNodes's revision + * @param buildDate DataNodes's build timestamp * - * @param version DataNode version + * @return ExtraDatanodeDetails instance */ - public void setVersion(String version) { - this.version = version; + public static ExtraDatanodeDetails newExtraDatanodeDetails( + String version, long setupTime, String revision, String buildDate) { + return new ExtraDatanodeDetails(version, setupTime, revision, buildDate); } - /** - * Returns the DataNode setup time. - * - * @return DataNode setup time - */ - public long getSetupTime() { - return setupTime; - } + public static final class ExtraDatanodeDetails { + private String version; + private long setupTime; + private String revision; + private String buildDate; - /** - * Set DataNode setup time. - * - * @param setupTime DataNode setup time - */ - public void setSetupTime(long setupTime) { - this.setupTime = setupTime; - } + /** + * Constructs ExtraDatanodeDetails instance. + * @param version DataNode's version + * @param setupTime the setup time of DataNode + * @param revision DataNodes's revision + * @param buildDate DataNodes's build timestamp + */ + public ExtraDatanodeDetails(String version, long setupTime, + String revision, String buildDate) { + this.version = version; + this.setupTime = setupTime; + this.revision = revision; + this.buildDate = buildDate; + } - /** - * Returns the DataNode revision. - * - * @return DataNode revision - */ - public String getRevision() { - return revision; - } + public ExtraDatanodeDetails(ExtraDatanodeDetails + ExtraDatanodeDetails) { + this.version = ExtraDatanodeDetails.version; + this.setupTime = ExtraDatanodeDetails.setupTime; + this.revision = ExtraDatanodeDetails.revision; + this.buildDate = ExtraDatanodeDetails.buildDate; + } - /** - * Set DataNode revision. - * - * @param rev DataNode revision - */ - public void setRevision(String rev) { - this.revision = rev; - } + /** + * Returns the DataNode version. + * + * @return DataNode version + */ + public String getVersion() { + return version; + } - /** - * Returns the DataNode build date. - * - * @return DataNode build date - */ - public String getBuildDate() { - return buildDate; - } + /** + * Set DataNode version. + * + * @param version DataNode version + */ + public void setVersion(String version) { + this.version = version; + } - /** - * Set DataNode build date. - * - * @param date DataNode build date - */ - public void setBuildDate(String date) { - this.buildDate = date; + /** + * Returns the DataNode setup time. + * + * @return DataNode setup time + */ + public long getSetupTime() { + return setupTime; + } + + /** + * Set DataNode setup time. + * + * @param setupTime DataNode setup time + */ + public void setSetupTime(long setupTime) { + this.setupTime = setupTime; + } + + /** + * Returns the DataNode revision. + * + * @return DataNode revision + */ + public String getRevision() { + return revision; + } + + /** + * Set DataNode revision. + * + * @param rev DataNode revision + */ + public void setRevision(String rev) { + this.revision = rev; + } + + /** + * Returns the DataNode build date. + * + * @return DataNode build date + */ + public String getBuildDate() { + return buildDate; + } + + /** + * Set DataNode build date. + * + * @param date DataNode build date + */ + public void setBuildDate(String date) { + this.buildDate = date; + } + + /** + * Returns a ExtraDatanodeDetails from the protocol buffers. + * + * @param extraDatanodeDetailsProto - protoBuf Message + * @return ExtraDatanodeDetails + */ + public static ExtraDatanodeDetails getFromProtoBuf( + HddsProtos.ExtraDatanodeDetailsProto extraDatanodeDetailsProto) { + Builder builder = newBuilder(); + if (extraDatanodeDetailsProto.hasVersion()) { + builder.setVersion(extraDatanodeDetailsProto.getVersion()); + } + if (extraDatanodeDetailsProto.hasSetupTime()) { + builder.setSetupTime(extraDatanodeDetailsProto.getSetupTime()); + } + if (extraDatanodeDetailsProto.hasRevision()) { + builder.setRevision(extraDatanodeDetailsProto.getRevision()); + } + if (extraDatanodeDetailsProto.hasBuildDate()) { + builder.setBuildDate(extraDatanodeDetailsProto.getBuildDate()); + } + return builder.build(); + } + + /** + * Returns a ExtraDatanodeDetailsProto protobuf message from a datanode ID. + * @return HddsProtos.ExtraDatanodeDetailsProto + */ + public HddsProtos.ExtraDatanodeDetailsProto getProtoBufMessage() { + HddsProtos.ExtraDatanodeDetailsProto.Builder builder = + HddsProtos.ExtraDatanodeDetailsProto.newBuilder(); + if (!Strings.isNullOrEmpty(getVersion())) { + builder.setVersion(getVersion()); + } + + builder.setSetupTime(getSetupTime()); + + if (!Strings.isNullOrEmpty(getRevision())) { + builder.setRevision(getRevision()); + } + if (!Strings.isNullOrEmpty(getBuildDate())) { + builder.setBuildDate(getBuildDate()); + } + + return builder.build(); + } + + /** + * Returns ExtraDatanodeDetails.Builder instance. + * + * @return ExtraDatanodeDetails.Builder + */ + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builder class for building DatanodeDetails. + */ + public static final class Builder { + private String version; + private long setupTime; + private String revision; + private String buildDate; + + /** + * Default private constructor. To create Builder instance use + * ExtraDatanodeDetails#newBuilder. + */ + private Builder() { } + + /** + * Sets the DataNode version. + * + * @param ver the version of DataNode. + * + * @return ExtraDatanodeDetails.Builder + */ + public Builder setVersion(String ver) { + this.version = ver; + return this; + } + + /** + * Sets the DataNode revision. + * + * @param rev the revision of DataNode. + * + * @return ExtraDatanodeDetails.Builder + */ + public Builder setRevision(String rev) { + this.revision = rev; + return this; + } + + /** + * Sets the DataNode build date. + * + * @param date the build date of DataNode. + * + * @return ExtraDatanodeDetails.Builder + */ + public Builder setBuildDate(String date) { + this.buildDate = date; + return this; + } + + /** + * Sets the DataNode setup time. + * + * @param time the setup time of DataNode. + * + * @return ExtraDatanodeDetails.Builder + */ + public Builder setSetupTime(long time) { + this.setupTime = time; + return this; + } + + /** + * Builds and returns DatanodeDetails instance. + * + * @return ExtraDatanodeDetails + */ + public ExtraDatanodeDetails build() { + ExtraDatanodeDetails dn = new ExtraDatanodeDetails( + version, setupTime, revision, buildDate); + return dn; + } + } } + } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java index 579271943c35..27c85eea9d26 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java @@ -22,6 +22,8 @@ import java.util.Random; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.hdds.protocol.DatanodeDetails.ExtraDatanodeDetails; +import org.apache.hadoop.util.Time; /** * Provides {@link DatanodeDetails} factory methods for testing. @@ -87,6 +89,18 @@ public static DatanodeDetails createDatanodeDetails(String uuid, public static DatanodeDetails createDatanodeDetails(String uuid, String hostname, String ipAddress, String networkLocation, int port) { + ExtraDatanodeDetails extraDatanodeDetails = + DatanodeDetails.newExtraDatanodeDetails( + "0.6.0", Time.now(), + "1346f493fa1690358add7bb9f3e5b52545993f36", + "2020-08-01T16:19Z"); + return createDatanodeDetails(uuid, hostname, ipAddress, + networkLocation, 0, extraDatanodeDetails); + } + + public static DatanodeDetails createDatanodeDetails(String uuid, + String hostname, String ipAddress, String networkLocation, int port, + ExtraDatanodeDetails extraDatanodeDetails) { DatanodeDetails.Port containerPort = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, port); DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( @@ -101,6 +115,7 @@ public static DatanodeDetails createDatanodeDetails(String uuid, .addPort(ratisPort) .addPort(restPort) .setNetworkLocation(networkLocation) + .setExtraDatanodeDetails(extraDatanodeDetails) .build(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index cfb22e30dcd2..9170ff44a23f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails.ExtraDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.utils.HddsServerUtil; @@ -202,18 +203,19 @@ public void start() { try { String hostname = HddsUtils.getHostName(conf); String ip = InetAddress.getByName(hostname).getHostAddress(); + datanodeDetails = initializeDatanodeDetails(); datanodeDetails.setHostName(hostname); datanodeDetails.setIpAddress(ip); - datanodeDetails.setVersion( - HddsVersionInfo.HDDS_VERSION_INFO.getVersion()); - datanodeDetails.setSetupTime(Time.now()); - datanodeDetails.setRevision( - HddsVersionInfo.HDDS_VERSION_INFO.getRevision()); - datanodeDetails.setBuildDate(HddsVersionInfo.HDDS_VERSION_INFO.getDate()); + ExtraDatanodeDetails extraDatanodeDetails = new ExtraDatanodeDetails( + HddsVersionInfo.HDDS_VERSION_INFO.getVersion(), + Time.now(), + HddsVersionInfo.HDDS_VERSION_INFO.getRevision(), + HddsVersionInfo.HDDS_VERSION_INFO.getDate()); TracingUtil.initTracing( "HddsDatanodeService." + datanodeDetails.getUuidString() .substring(0, 8), conf); + datanodeDetails.setExtraDatanodeDetails(extraDatanodeDetails); LOG.info("HddsDatanodeService host:{} ip:{}", hostname, ip); // Authenticate Hdds Datanode service if security is enabled if (OzoneSecurityUtil.isSecurityEnabled(conf)) { @@ -245,8 +247,8 @@ public void start() { if (OzoneSecurityUtil.isSecurityEnabled(conf)) { initializeCertificateClient(conf); } - datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf, - dnCertClient, this::terminateDatanode); + datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, + conf, dnCertClient, this::terminateDatanode); try { httpServer = new HddsDatanodeHttpServer(conf); httpServer.start(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 425074d6888a..1951c8e097c5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -90,7 +90,7 @@ public class DatanodeStateMachine implements Closeable { /** * Constructs a a datanode state machine. - * @param datanodeDetails - DatanodeDetails used to identify a datanode + * @param datanodeDetails - DatanodeDetails used to identify a datanode * @param conf - Configuration. * @param certClient - Datanode Certificate client, required if security is * enabled @@ -135,7 +135,7 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, dnConf.getReplicationMaxStreams()); // When we add new handlers just adding a new handler here should do the - // trick. + // trick. commandDispatcher = CommandDispatcher.newBuilder() .addHandler(new CloseContainerCommandHandler()) .addHandler(new DeleteBlocksCommandHandler(container.getContainerSet(), diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java index b0cfb4ce001a..45f510fd25d5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java @@ -178,7 +178,7 @@ private Callable getEndPointTask( * @return next container state. */ private DatanodeStateMachine.DatanodeStates - computeNextContainerState( + computeNextContainerState( List> results) { for (Future state : results) { try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index be95f011407c..3ec508c9ca9a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; + .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.ozone.container.common.statemachine .EndpointStateMachine; import org.apache.hadoop.hdds.protocol.proto @@ -119,8 +119,9 @@ public EndpointStateMachine.EndPointStates call() throws Exception { datanodeContainerManager.getPipelineReport(); // TODO : Add responses to the command Queue. SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint() - .register(datanodeDetails.getProtoBufMessage(), nodeReport, - containerReport, pipelineReportsProto); + .register(datanodeDetails.getProtoBufMessage(), + datanodeDetails.getExtraDatanodeDetails().getProtoBufMessage(), + nodeReport, containerReport, pipelineReportsProto); Preconditions.checkState(UUID.fromString(response.getDatanodeUUID()) .equals(datanodeDetails.getUuid()), "Unexpected datanode ID in the response."); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java index b62f712bc769..6e99c3a6bbe1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java @@ -18,8 +18,9 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtraDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; + .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; import org.apache.hadoop.hdds.protocol.proto @@ -53,7 +54,7 @@ public interface StorageContainerDatanodeProtocol { /** * Version 1: Initial version. */ - long versionID = 1L; + long versionID = 1L; /** * Returns SCM version. @@ -74,14 +75,16 @@ SCMHeartbeatResponseProto sendHeartbeat(SCMHeartbeatRequestProto heartbeat) /** * Register Datanode. * @param datanodeDetails - Datanode Details. + * @param extraDatanodeDetails - Datanode more details. * @param nodeReport - Node Report. * @param containerReportsRequestProto - Container Reports. * @return SCM Command. */ SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetails, - NodeReportProto nodeReport, - ContainerReportsProto containerReportsRequestProto, - PipelineReportsProto pipelineReports) throws IOException; + DatanodeDetailsProto datanodeDetails, + ExtraDatanodeDetailsProto extraDatanodeDetails, + NodeReportProto nodeReport, + ContainerReportsProto containerReportsRequestProto, + PipelineReportsProto pipelineReports) throws IOException; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java index cb55880c5c96..718f12af83b6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java @@ -51,7 +51,7 @@ public interface StorageContainerNodeProtocol { /** * Register the node if the node finds that it is not registered with any SCM. - * @param datanodeDetails DatanodeDetails + * @param datanodeDetails ExtendedDatanodeDetails * @param nodeReport NodeReportProto * @param pipelineReport PipelineReportsProto * @return SCMRegisteredResponseProto diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java index 9b446666e5d1..63a69cd7e170 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java @@ -19,8 +19,10 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos + .ExtraDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; + .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; @@ -154,19 +156,23 @@ public SCMHeartbeatResponseProto sendHeartbeat( * Register Datanode. * * @param datanodeDetailsProto - Datanode Details + * @param extraDatanodeDetails - Datanode more details. * @param nodeReport - Node Report. * @param containerReportsRequestProto - Container Reports. * @return SCM Command. */ @Override public SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport, + DatanodeDetailsProto datanodeDetailsProto, + ExtraDatanodeDetailsProto extraDatanodeDetails, + NodeReportProto nodeReport, ContainerReportsProto containerReportsRequestProto, PipelineReportsProto pipelineReportsProto) throws IOException { SCMRegisterRequestProto.Builder req = SCMRegisterRequestProto.newBuilder(); req.setDatanodeDetails(datanodeDetailsProto); + req.setExtraDatanodeDetails(extraDatanodeDetails); req.setContainerReport(containerReportsRequestProto); req.setPipelineReports(pipelineReportsProto); req.setNodeReport(nodeReport); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java index e99cbae9f027..1d2d063eccd3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java @@ -68,7 +68,8 @@ public SCMRegisteredResponseProto register( .getContainerReport(); NodeReportProto dnNodeReport = request.getNodeReport(); PipelineReportsProto pipelineReport = request.getPipelineReports(); - return impl.register(request.getDatanodeDetails(), dnNodeReport, + return impl.register(request.getDatanodeDetails(), + request.getExtraDatanodeDetails(), dnNodeReport, containerRequestProto, pipelineReport); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java index c4b29ba2722d..902677857a93 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.common; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtraDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -217,7 +218,9 @@ private void sleepIfNeeded() { @Override public StorageContainerDatanodeProtocolProtos .SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport, + DatanodeDetailsProto datanodeDetailsProto, + ExtraDatanodeDetailsProto extraDatanodeDetailsProto, + NodeReportProto nodeReport, ContainerReportsProto containerReportsRequestProto, PipelineReportsProto pipelineReportsProto) throws IOException { diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 3eeb3a321927..904211788332 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -43,14 +43,22 @@ message DatanodeDetailsProto { // network name, can be Ip address or host name, depends optional string networkName = 6; optional string networkLocation = 7; // Network topology location - optional string version = 8; // Datanode version - optional int64 setupTime = 9; - optional string revision = 10; - optional string buildDate = 11; // TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128 and mark as required optional UUID uuid128 = 100; // UUID with 128 bits assigned to the Datanode. } +/** + The DatanodeDetailsProto is a basic type that will be shared by many Proto, + and we just need some more detail information to register with SCM, so I use + ExtraDatanodeDetailsProto to extend DatanodeDetailsProto for registration. +*/ +message ExtraDatanodeDetailsProto { + optional string version = 1; // Datanode version + optional int64 setupTime = 2; + optional string revision = 3; + optional string buildDate = 4; +} + /** Proto message encapsulating information required to uniquely identify a OzoneManager. diff --git a/hadoop-hdds/interface-client/src/main/resources/proto.lock b/hadoop-hdds/interface-client/src/main/resources/proto.lock index b27896c655e3..34f6d0ce213a 100644 --- a/hadoop-hdds/interface-client/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-client/src/main/resources/proto.lock @@ -1530,16 +1530,6 @@ "name": "networkLocation", "type": "string" }, - { - "id": 8, - "name": "version", - "type": "string" - }, - { - "id": 9, - "name": "setupTime", - "type": "int64" - }, { "id": 100, "name": "uuid128", @@ -1547,6 +1537,31 @@ } ] }, + { + "name": "ExtraDatanodeDetailsProto", + "fields": [ + { + "id": 1, + "name": "version", + "type": "string" + }, + { + "id": 2, + "name": "setupTime", + "type": "int64" + }, + { + "id": 3, + "name": "revision", + "type": "string" + }, + { + "id": 4, + "name": "buildDate", + "type": "string" + } + ] + }, { "name": "OzoneManagerDetailsProto", "fields": [ diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index 00c8fdbf3fb4..d0f2dd412d1f 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -89,9 +89,15 @@ message SCMVersionResponseProto { message SCMRegisterRequestProto { required DatanodeDetailsProto datanodeDetails = 1; - required NodeReportProto nodeReport = 2; - required ContainerReportsProto containerReport = 3; - required PipelineReportsProto pipelineReports = 4; + required ExtraDatanodeDetailsProto extraDatanodeDetails = 2; + required NodeReportProto nodeReport = 3; + required ContainerReportsProto containerReport = 4; + required PipelineReportsProto pipelineReports = 5; +} + +message ExtendedDatanodeDetailsProto { + required DatanodeDetailsProto datanodeDetails = 1; + required ExtraDatanodeDetailsProto extraDatanodeDetails = 2; } /** diff --git a/hadoop-hdds/interface-server/src/main/resources/proto.lock b/hadoop-hdds/interface-server/src/main/resources/proto.lock index 5492e00b10a3..9e9ea78d495c 100644 --- a/hadoop-hdds/interface-server/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-server/src/main/resources/proto.lock @@ -315,16 +315,21 @@ }, { "id": 2, + "name": "extraDatanodeDetails", + "type": "ExtraDatanodeDetailsProto" + }, + { + "id": 3, "name": "nodeReport", "type": "NodeReportProto" }, { - "id": 3, + "id": 4, "name": "containerReport", "type": "ContainerReportsProto" }, { - "id": 4, + "id": 5, "name": "pipelineReports", "type": "PipelineReportsProto" } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java index b39440f41f99..5eefa4f3f6c3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java @@ -127,7 +127,7 @@ public int getHealthyVolumeCount() { */ private int getFailedVolumeCount() { return (int) storageReports.stream(). - filter(e -> e.hasFailed() ? e.getFailed() : false).count(); + filter(e -> e.hasFailed() ? e.getFailed() : false).count(); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index df21b84eafda..f120b3b6d4c5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -146,7 +146,7 @@ public interface NodeManager extends StorageContainerNodeProtocol, * use addDatanodeInContainerMap call. */ void addContainer(DatanodeDetails datanodeDetails, - ContainerID containerId) throws NodeNotFoundException; + ContainerID containerId) throws NodeNotFoundException; /** * Remaps datanode to containers mapping to the new set of containers. @@ -181,7 +181,7 @@ Set getContainers(DatanodeDetails datanodeDetails) * @param nodeReport */ void processNodeReport(DatanodeDetails datanodeDetails, - NodeReportProto nodeReport); + NodeReportProto nodeReport); /** * Get list of SCMCommands in the Command Queue for a particular Datanode. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index b6248aa817d0..dc43d164f701 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -481,7 +481,7 @@ public void removePipeline(Pipeline pipeline) { * use addDatanodeInContainerMap call. */ public void addContainer(final UUID uuid, - final ContainerID containerId) + final ContainerID containerId) throws NodeNotFoundException { nodeStateMap.addContainer(uuid, containerId); } @@ -564,18 +564,18 @@ private void checkNodesHealth() { * >>-->> time-line >>-->> * * Here is the logic of computing the health of a node. -     * -     * 1. We get the current time and look back that the time -     *  when we got a heartbeat from a node. -     *  -     * 2. If the last heartbeat was within the window of healthy node we mark -     *  it as healthy. -     *  -     * 3. If the last HB Time stamp is longer and falls within the window of -     *  Stale Node time, we will mark it as Stale. -     *  -     * 4. If the last HB time is older than the Stale Window, then the node is -     * marked as dead. + * + * 1. We get the current time and look back that the time + *  when we got a heartbeat from a node. + * + * 2. If the last heartbeat was within the window of healthy node we mark + *  it as healthy. + * + * 3. If the last HB Time stamp is longer and falls within the window of + *  Stale Node time, we will mark it as Stale. + * + * 4. If the last HB time is older than the Stale Window, then the node is + * marked as dead. * * The Processing starts from current time and looks backwards in time. */ @@ -598,33 +598,33 @@ private void checkNodesHealth() { for (UUID id : nodes) { DatanodeInfo node = nodeStateMap.getNodeInfo(id); switch (state) { - case HEALTHY: - // Move the node to STALE if the last heartbeat time is less than - // configured stale-node interval. - updateNodeState(node, staleNodeCondition, state, + case HEALTHY: + // Move the node to STALE if the last heartbeat time is less than + // configured stale-node interval. + updateNodeState(node, staleNodeCondition, state, + NodeLifeCycleEvent.TIMEOUT); + break; + case STALE: + // Move the node to DEAD if the last heartbeat time is less than + // configured dead-node interval. + updateNodeState(node, deadNodeCondition, state, NodeLifeCycleEvent.TIMEOUT); - break; - case STALE: - // Move the node to DEAD if the last heartbeat time is less than - // configured dead-node interval. - updateNodeState(node, deadNodeCondition, state, - NodeLifeCycleEvent.TIMEOUT); - // Restore the node if we have received heartbeat before configured - // stale-node interval. - updateNodeState(node, healthyNodeCondition, state, - NodeLifeCycleEvent.RESTORE); - break; - case DEAD: - // Resurrect the node if we have received heartbeat before - // configured stale-node interval. - updateNodeState(node, healthyNodeCondition, state, - NodeLifeCycleEvent.RESURRECT); - break; + // Restore the node if we have received heartbeat before configured + // stale-node interval. + updateNodeState(node, healthyNodeCondition, state, + NodeLifeCycleEvent.RESTORE); + break; + case DEAD: + // Resurrect the node if we have received heartbeat before + // configured stale-node interval. + updateNodeState(node, healthyNodeCondition, state, + NodeLifeCycleEvent.RESURRECT); + break; // We don't do anything for DECOMMISSIONING and DECOMMISSIONED in // heartbeat processing. - case DECOMMISSIONING: - case DECOMMISSIONED: - default: + case DECOMMISSIONING: + case DECOMMISSIONED: + default: } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 1a0cec3b2176..b17d9f446403 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -520,10 +520,10 @@ public int getNumHealthyVolumes(List dnList) { for (DatanodeDetails dn : dnList) { try { volumeCountList.add(nodeStateManager.getNode(dn). - getHealthyVolumeCount()); + getHealthyVolumeCount()); } catch (NodeNotFoundException e) { LOG.warn("Cannot generate NodeStat, datanode {} not found.", - dn.getUuid()); + dn.getUuid()); } } Preconditions.checkArgument(!volumeCountList.isEmpty()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java index baebef5ccf87..02151ab9b337 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java @@ -105,7 +105,7 @@ public void addNode(DatanodeDetails datanodeDetails, NodeState nodeState) * @throws NodeNotFoundException if the node is not present */ public void updateNodeState(UUID nodeId, NodeState currentState, - NodeState newState)throws NodeNotFoundException { + NodeState newState)throws NodeNotFoundException { lock.writeLock().lock(); try { checkIfNodeExist(nodeId); @@ -234,7 +234,7 @@ public NodeState getNodeState(UUID uuid) throws NodeNotFoundException { * use addDatanodeInContainerMap call. */ public void addContainer(final UUID uuid, - final ContainerID containerId) + final ContainerID containerId) throws NodeNotFoundException { lock.writeLock().lock(); try { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index ad7f65ab5853..8c0d601e87ac 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails.ExtraDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; @@ -204,12 +205,16 @@ public SCMVersionResponseProto getVersion(SCMVersionRequestProto @Override public SCMRegisteredResponseProto register( HddsProtos.DatanodeDetailsProto datanodeDetailsProto, + HddsProtos.ExtraDatanodeDetailsProto extraDatanodeDetailsProto, NodeReportProto nodeReport, ContainerReportsProto containerReportsProto, PipelineReportsProto pipelineReportsProto) throws IOException { DatanodeDetails datanodeDetails = DatanodeDetails .getFromProtoBuf(datanodeDetailsProto); + ExtraDatanodeDetails extraDatanodeDetails =ExtraDatanodeDetails + .getFromProtoBuf(extraDatanodeDetailsProto); + datanodeDetails.setExtraDatanodeDetails(extraDatanodeDetails); boolean auditSuccess = true; Map auditMap = Maps.newHashMap(); auditMap.put("datanodeDetails", datanodeDetails.toString()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 663ac8c5b80a..00b8da806a01 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -269,8 +269,9 @@ public void testRegister() throws Exception { try (EndpointStateMachine rpcEndPoint = createEndpoint( SCMTestUtils.getConf(), serverAddress, 1000)) { SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint() - .register(nodeToRegister.getProtoBufMessage(), TestUtils - .createNodeReport( + .register(nodeToRegister.getProtoBufMessage(), + nodeToRegister.getExtraDatanodeDetails().getProtoBufMessage(), + TestUtils.createNodeReport( getStorageReports(nodeToRegister.getUuid())), TestUtils.getRandomContainerReports(10), TestUtils.getRandomPipelineReports()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index bd022c4f1da2..ad3b329ba605 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -121,10 +121,10 @@ public Response getDatanodes() { .withPipelines(pipelines) .withLeaderCount(leaderCount.get()) .withUUid(datanode.getUuidString()) - .withVersion(datanode.getVersion()) - .withSetupTime(datanode.getSetupTime()) - .withRevision(datanode.getRevision()) - .withBuildDate(datanode.getBuildDate()) + .withVersion(datanode.getExtraDatanodeDetails().getVersion()) + .withSetupTime(datanode.getExtraDatanodeDetails().getSetupTime()) + .withRevision(datanode.getExtraDatanodeDetails().getRevision()) + .withBuildDate(datanode.getExtraDatanodeDetails().getBuildDate()) .build()); }); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 79cf1bbf9f6e..48f30cc7fbae 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -21,6 +21,7 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; @@ -128,6 +129,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest { private long containerId = 1L; private ContainerReportsProto containerReportsProto; private DatanodeDetailsProto datanodeDetailsProto; + private HddsProtos.ExtraDatanodeDetailsProto extraDatanodeDetailsProto; private Pipeline pipeline; private FileCountBySizeDao fileCountBySizeDao; private DSLContext dslContext; @@ -269,6 +271,13 @@ public void setUp() throws Exception { .setUuid(datanodeId) .setIpAddress(ip1) .build(); + extraDatanodeDetailsProto = + HddsProtos.ExtraDatanodeDetailsProto.newBuilder() + .setVersion("0.6.0") + .setSetupTime(1596347628802l) + .setBuildDate("2020-08-01T08:50Z") + .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") + .build(); StorageReportProto storageReportProto1 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) .setStorageLocation("/disk1").setScmUsed(10000).setRemaining(5400) @@ -287,11 +296,12 @@ public void setUp() throws Exception { .addStorageReport(storageReportProto2).build(); DatanodeDetailsProto datanodeDetailsProto2 = + DatanodeDetailsProto.newBuilder() - .setHostName(host2) - .setUuid(datanodeId2) - .setIpAddress(ip2) - .build(); + .setHostName(host2) + .setUuid(datanodeId2) + .setIpAddress(ip2) + .build(); StorageReportProto storageReportProto3 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) .setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800) @@ -311,10 +321,11 @@ public void setUp() throws Exception { try { reconScm.getDatanodeProtocolServer() - .register(datanodeDetailsProto, nodeReportProto, - containerReportsProto, pipelineReportsProto); + .register(datanodeDetailsProto, extraDatanodeDetailsProto, + nodeReportProto, containerReportsProto, pipelineReportsProto); reconScm.getDatanodeProtocolServer() - .register(datanodeDetailsProto2, nodeReportProto2, + .register(datanodeDetailsProto2, extraDatanodeDetailsProto, + nodeReportProto2, ContainerReportsProto.newBuilder().build(), PipelineReportsProto.newBuilder().build()); // Process all events in the event queue @@ -364,39 +375,39 @@ private void testDatanodeResponse(DatanodeMetadata datanodeMetadata) throws IOException { String hostname = datanodeMetadata.getHostname(); switch (hostname) { - case host1: - Assert.assertEquals(75000, - datanodeMetadata.getDatanodeStorageReport().getCapacity()); - Assert.assertEquals(15400, - datanodeMetadata.getDatanodeStorageReport().getRemaining()); - Assert.assertEquals(35000, - datanodeMetadata.getDatanodeStorageReport().getUsed()); - - Assert.assertEquals(1, datanodeMetadata.getPipelines().size()); - Assert.assertEquals(pipelineId, - datanodeMetadata.getPipelines().get(0).getPipelineID().toString()); - Assert.assertEquals(pipeline.getFactor().getNumber(), - datanodeMetadata.getPipelines().get(0).getReplicationFactor()); - Assert.assertEquals(pipeline.getType().toString(), - datanodeMetadata.getPipelines().get(0).getReplicationType()); - Assert.assertEquals(pipeline.getLeaderNode().getHostName(), - datanodeMetadata.getPipelines().get(0).getLeaderNode()); - Assert.assertEquals(1, datanodeMetadata.getLeaderCount()); - break; - case host2: - Assert.assertEquals(130000, - datanodeMetadata.getDatanodeStorageReport().getCapacity()); - Assert.assertEquals(17800, - datanodeMetadata.getDatanodeStorageReport().getRemaining()); - Assert.assertEquals(80000, - datanodeMetadata.getDatanodeStorageReport().getUsed()); - - Assert.assertEquals(0, datanodeMetadata.getPipelines().size()); - Assert.assertEquals(0, datanodeMetadata.getLeaderCount()); - break; - default: - Assert.fail(String.format("Datanode %s not registered", - hostname)); + case host1: + Assert.assertEquals(75000, + datanodeMetadata.getDatanodeStorageReport().getCapacity()); + Assert.assertEquals(15400, + datanodeMetadata.getDatanodeStorageReport().getRemaining()); + Assert.assertEquals(35000, + datanodeMetadata.getDatanodeStorageReport().getUsed()); + + Assert.assertEquals(1, datanodeMetadata.getPipelines().size()); + Assert.assertEquals(pipelineId, + datanodeMetadata.getPipelines().get(0).getPipelineID().toString()); + Assert.assertEquals(pipeline.getFactor().getNumber(), + datanodeMetadata.getPipelines().get(0).getReplicationFactor()); + Assert.assertEquals(pipeline.getType().toString(), + datanodeMetadata.getPipelines().get(0).getReplicationType()); + Assert.assertEquals(pipeline.getLeaderNode().getHostName(), + datanodeMetadata.getPipelines().get(0).getLeaderNode()); + Assert.assertEquals(1, datanodeMetadata.getLeaderCount()); + break; + case host2: + Assert.assertEquals(130000, + datanodeMetadata.getDatanodeStorageReport().getCapacity()); + Assert.assertEquals(17800, + datanodeMetadata.getDatanodeStorageReport().getRemaining()); + Assert.assertEquals(80000, + datanodeMetadata.getDatanodeStorageReport().getUsed()); + + Assert.assertEquals(0, datanodeMetadata.getPipelines().size()); + Assert.assertEquals(0, datanodeMetadata.getLeaderCount()); + break; + default: + Assert.fail(String.format("Datanode %s not registered", + hostname)); } } From ff7b46c0bc1d40e4cd279c4c69b9e9132751fc2f Mon Sep 17 00:00:00 2001 From: HuangTao Date: Sat, 8 Aug 2020 01:17:52 +0800 Subject: [PATCH 2/7] HDDS-4039. refactor --- .../hadoop/hdds/protocol/DatanodeDetails.java | 480 ++++++++---------- .../hdds/protocol/MockDatanodeDetails.java | 17 +- .../hadoop/ozone/HddsDatanodeService.java | 13 +- .../states/endpoint/RegisterEndpointTask.java | 3 +- .../StorageContainerDatanodeProtocol.java | 9 +- .../StorageContainerNodeProtocol.java | 2 +- ...atanodeProtocolClientSideTranslatorPB.java | 11 +- ...atanodeProtocolServerSideTranslatorPB.java | 3 +- .../ozone/container/common/ScmTestMock.java | 10 +- .../src/main/proto/hdds.proto | 15 +- .../src/main/resources/proto.lock | 13 +- .../ScmServerDatanodeHeartbeatProtocol.proto | 14 +- .../src/main/resources/proto.lock | 13 +- .../scm/server/SCMDatanodeProtocolServer.java | 9 +- .../ozone/container/common/TestEndPoint.java | 5 +- .../hadoop/ozone/recon/api/NodeEndpoint.java | 8 +- .../recon/codec/DatanodeDetailsCodec.java | 5 +- .../hadoop/ozone/recon/api/TestEndpoints.java | 102 ++-- 18 files changed, 320 insertions(+), 412 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index b62ae27c98ff..a4d24e4a2fe8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -43,7 +43,7 @@ @InterfaceStability.Evolving public class DatanodeDetails extends NodeImpl implements Comparable { -/** + /** * DataNode's unique identifier in the cluster. */ private final UUID uuid; @@ -52,7 +52,10 @@ public class DatanodeDetails extends NodeImpl implements private String hostName; private List ports; private String certSerialId; - private ExtraDatanodeDetails extraDatanodeDetails; + private String version; + private long setupTime; + private String revision; + private String buildDate; /** * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used @@ -63,15 +66,25 @@ public class DatanodeDetails extends NodeImpl implements * @param networkLocation DataNode's network location path * @param ports Ports used by the DataNode * @param certSerialId serial id from SCM issued certificate. + * @param version DataNode's version + * @param setupTime the setup time of DataNode + * @param revision DataNodes's revision + * @param buildDate DataNodes's build timestamp */ - protected DatanodeDetails(UUID uuid, String ipAddress, String hostName, - String networkLocation, List ports, String certSerialId) { + @SuppressWarnings("parameternumber") + private DatanodeDetails(UUID uuid, String ipAddress, String hostName, + String networkLocation, List ports, String certSerialId, + String version, long setupTime, String revision, String buildDate) { super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT); this.uuid = uuid; this.ipAddress = ipAddress; this.hostName = hostName; this.ports = ports; this.certSerialId = certSerialId; + this.version = version; + this.setupTime = setupTime; + this.revision = revision; + this.buildDate = buildDate; } public DatanodeDetails(DatanodeDetails datanodeDetails) { @@ -83,9 +96,10 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) { this.ports = datanodeDetails.ports; this.setNetworkName(datanodeDetails.getNetworkName()); this.setParent(datanodeDetails.getParent()); - if (datanodeDetails.getExtraDatanodeDetails() != null) { - this.extraDatanodeDetails = datanodeDetails.getExtraDatanodeDetails(); - } + this.version = datanodeDetails.version; + this.setupTime = datanodeDetails.setupTime; + this.revision = datanodeDetails.revision; + this.buildDate = datanodeDetails.buildDate; } /** @@ -179,25 +193,6 @@ public Port getPort(Port.Name name) { return null; } - /** - * Returns DataNode extended fields. - * - * @return ExtraDatanodeDetails - */ - public ExtraDatanodeDetails getExtraDatanodeDetails() { - return extraDatanodeDetails; - } - - /** - * Sets a DataNode extended fields. - * - * @param extraDatanodeDetails DataNode extended fields - */ - public void setExtraDatanodeDetails( - ExtraDatanodeDetails extraDatanodeDetails) { - this.extraDatanodeDetails = extraDatanodeDetails; - } - /** * Returns a DatanodeDetails from the protocol buffers. * @@ -236,6 +231,36 @@ public static DatanodeDetails getFromProtoBuf( return builder.build(); } + + /** + * Returns a ExtendedDatanodeDetails from the protocol buffers. + * + * @param extendedDetailsProto - protoBuf Message + * @return DatanodeDetails + */ + public static DatanodeDetails getFromProtoBuf( + HddsProtos.ExtendedDatanodeDetailsProto extendedDetailsProto) { + DatanodeDetails.Builder builder = newBuilder(); + if (extendedDetailsProto.hasDatanodeDetails()) { + DatanodeDetails datanodeDetails = getFromProtoBuf( + extendedDetailsProto.getDatanodeDetails()); + builder.setDatanodeDetails(datanodeDetails); + } + if (extendedDetailsProto.hasVersion()) { + builder.setVersion(extendedDetailsProto.getVersion()); + } + if (extendedDetailsProto.hasSetupTime()) { + builder.setSetupTime(extendedDetailsProto.getSetupTime()); + } + if (extendedDetailsProto.hasRevision()) { + builder.setRevision(extendedDetailsProto.getRevision()); + } + if (extendedDetailsProto.hasBuildDate()) { + builder.setBuildDate(extendedDetailsProto.getBuildDate()); + } + return builder.build(); + } + /** * Returns a DatanodeDetails protobuf message from a datanode ID. * @return HddsProtos.DatanodeDetailsProto @@ -278,6 +303,33 @@ public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { return builder.build(); } + /** + * Returns a ExtendedDatanodeDetails protobuf message from a datanode ID. + * @return HddsProtos.ExtendedDatanodeDetailsProto + */ + public HddsProtos.ExtendedDatanodeDetailsProto getExtendedProtoBufMessage() { + HddsProtos.DatanodeDetailsProto datanodeDetailsProto = getProtoBufMessage(); + + HddsProtos.ExtendedDatanodeDetailsProto.Builder extendedBuilder = + HddsProtos.ExtendedDatanodeDetailsProto.newBuilder() + .setDatanodeDetails(datanodeDetailsProto); + + if (!Strings.isNullOrEmpty(getVersion())) { + extendedBuilder.setVersion(getVersion()); + } + + extendedBuilder.setSetupTime(getSetupTime()); + + if (!Strings.isNullOrEmpty(getRevision())) { + extendedBuilder.setRevision(getRevision()); + } + if (!Strings.isNullOrEmpty(getBuildDate())) { + extendedBuilder.setBuildDate(getBuildDate()); + } + + return extendedBuilder.build(); + } + @Override public String toString() { return uuid.toString() + "{" + @@ -327,16 +379,40 @@ public static final class Builder { private String networkLocation; private List ports; private String certSerialId; - private ExtraDatanodeDetails extraDatanodeDetails; + private String version; + private long setupTime; + private String revision; + private String buildDate; /** * Default private constructor. To create Builder instance use * DatanodeDetails#newBuilder. */ - protected Builder() { + private Builder() { ports = new ArrayList<>(); } + /** + * Initialize with DatanodeDetails. + * + * @param details DatanodeDetails + * @return DatanodeDetails.Builder + */ + public Builder setDatanodeDetails(DatanodeDetails details) { + this.id = details.getUuid(); + this.ipAddress = details.getIpAddress(); + this.hostName = details.getHostName(); + this.networkName = details.getNetworkName(); + this.networkLocation = details.getNetworkLocation(); + this.ports = details.getPorts(); + this.certSerialId = details.getCertSerialId(); + this.version = details.getVersion(); + this.setupTime = details.getSetupTime(); + this.revision = details.getRevision(); + this.buildDate = details.getBuildDate(); + return this; + } + /** * Sets the DatanodeUuid. * @@ -417,15 +493,50 @@ public Builder setCertSerialId(String certId) { } /** - * Adds extra DataNode info. + * Sets the DataNode version. + * + * @param ver the version of DataNode. + * + * @return DatanodeDetails.Builder + */ + public Builder setVersion(String ver) { + this.version = ver; + return this; + } + + /** + * Sets the DataNode revision. + * + * @param rev the revision of DataNode. + * + * @return DatanodeDetails.Builder + */ + public Builder setRevision(String rev) { + this.revision = rev; + return this; + } + + /** + * Sets the DataNode build date. + * + * @param date the build date of DataNode. + * + * @return DatanodeDetails.Builder + */ + public Builder setBuildDate(String date) { + this.buildDate = date; + return this; + } + + /** + * Sets the DataNode setup time. * - * @param extraDatanodeDetails extra DataNode info. + * @param time the setup time of DataNode. * * @return DatanodeDetails.Builder */ - public Builder setExtraDatanodeDetails( - ExtraDatanodeDetails extraDatanodeDetails) { - this.extraDatanodeDetails = extraDatanodeDetails; + public Builder setSetupTime(long time) { + this.setupTime = time; return this; } @@ -440,13 +551,11 @@ public DatanodeDetails build() { networkLocation = NetConstants.DEFAULT_RACK; } DatanodeDetails dn = new DatanodeDetails(id, ipAddress, hostName, - networkLocation, ports, certSerialId); + networkLocation, ports, certSerialId, + version, setupTime, revision, buildDate); if (networkName != null) { dn.setNetworkName(networkName); } - if (extraDatanodeDetails != null) { - dn.setExtraDatanodeDetails(extraDatanodeDetails); - } return dn; } } @@ -519,7 +628,7 @@ public int hashCode() { * @param anObject * The object to compare this {@code Port} against * @return {@code true} if the given object represents a {@code Port} - and has the same name, {@code false} otherwise + and has the same name, {@code false} otherwise */ @Override public boolean equals(Object anObject) { @@ -551,251 +660,74 @@ public void setCertSerialId(String certSerialId) { } /** - * Constructs ExtraDatanodeDetails instance. - * - * @param version DataNode's version - * @param setupTime the setup time of DataNode - * @param revision DataNodes's revision - * @param buildDate DataNodes's build timestamp + * Returns the DataNode version. * - * @return ExtraDatanodeDetails instance + * @return DataNode version */ - public static ExtraDatanodeDetails newExtraDatanodeDetails( - String version, long setupTime, String revision, String buildDate) { - return new ExtraDatanodeDetails(version, setupTime, revision, buildDate); + public String getVersion() { + return version; } - public static final class ExtraDatanodeDetails { - private String version; - private long setupTime; - private String revision; - private String buildDate; - - /** - * Constructs ExtraDatanodeDetails instance. - * @param version DataNode's version - * @param setupTime the setup time of DataNode - * @param revision DataNodes's revision - * @param buildDate DataNodes's build timestamp - */ - public ExtraDatanodeDetails(String version, long setupTime, - String revision, String buildDate) { - this.version = version; - this.setupTime = setupTime; - this.revision = revision; - this.buildDate = buildDate; - } - - public ExtraDatanodeDetails(ExtraDatanodeDetails - ExtraDatanodeDetails) { - this.version = ExtraDatanodeDetails.version; - this.setupTime = ExtraDatanodeDetails.setupTime; - this.revision = ExtraDatanodeDetails.revision; - this.buildDate = ExtraDatanodeDetails.buildDate; - } - - /** - * Returns the DataNode version. - * - * @return DataNode version - */ - public String getVersion() { - return version; - } - - /** - * Set DataNode version. - * - * @param version DataNode version - */ - public void setVersion(String version) { - this.version = version; - } - - /** - * Returns the DataNode setup time. - * - * @return DataNode setup time - */ - public long getSetupTime() { - return setupTime; - } - - /** - * Set DataNode setup time. - * - * @param setupTime DataNode setup time - */ - public void setSetupTime(long setupTime) { - this.setupTime = setupTime; - } - - /** - * Returns the DataNode revision. - * - * @return DataNode revision - */ - public String getRevision() { - return revision; - } - - /** - * Set DataNode revision. - * - * @param rev DataNode revision - */ - public void setRevision(String rev) { - this.revision = rev; - } - - /** - * Returns the DataNode build date. - * - * @return DataNode build date - */ - public String getBuildDate() { - return buildDate; - } - - /** - * Set DataNode build date. - * - * @param date DataNode build date - */ - public void setBuildDate(String date) { - this.buildDate = date; - } - - /** - * Returns a ExtraDatanodeDetails from the protocol buffers. - * - * @param extraDatanodeDetailsProto - protoBuf Message - * @return ExtraDatanodeDetails - */ - public static ExtraDatanodeDetails getFromProtoBuf( - HddsProtos.ExtraDatanodeDetailsProto extraDatanodeDetailsProto) { - Builder builder = newBuilder(); - if (extraDatanodeDetailsProto.hasVersion()) { - builder.setVersion(extraDatanodeDetailsProto.getVersion()); - } - if (extraDatanodeDetailsProto.hasSetupTime()) { - builder.setSetupTime(extraDatanodeDetailsProto.getSetupTime()); - } - if (extraDatanodeDetailsProto.hasRevision()) { - builder.setRevision(extraDatanodeDetailsProto.getRevision()); - } - if (extraDatanodeDetailsProto.hasBuildDate()) { - builder.setBuildDate(extraDatanodeDetailsProto.getBuildDate()); - } - return builder.build(); - } - - /** - * Returns a ExtraDatanodeDetailsProto protobuf message from a datanode ID. - * @return HddsProtos.ExtraDatanodeDetailsProto - */ - public HddsProtos.ExtraDatanodeDetailsProto getProtoBufMessage() { - HddsProtos.ExtraDatanodeDetailsProto.Builder builder = - HddsProtos.ExtraDatanodeDetailsProto.newBuilder(); - if (!Strings.isNullOrEmpty(getVersion())) { - builder.setVersion(getVersion()); - } - - builder.setSetupTime(getSetupTime()); - - if (!Strings.isNullOrEmpty(getRevision())) { - builder.setRevision(getRevision()); - } - if (!Strings.isNullOrEmpty(getBuildDate())) { - builder.setBuildDate(getBuildDate()); - } - - return builder.build(); - } - - /** - * Returns ExtraDatanodeDetails.Builder instance. - * - * @return ExtraDatanodeDetails.Builder - */ - public static Builder newBuilder() { - return new Builder(); - } + /** + * Set DataNode version. + * + * @param version DataNode version + */ + public void setVersion(String version) { + this.version = version; + } - /** - * Builder class for building DatanodeDetails. - */ - public static final class Builder { - private String version; - private long setupTime; - private String revision; - private String buildDate; - - /** - * Default private constructor. To create Builder instance use - * ExtraDatanodeDetails#newBuilder. - */ - private Builder() { } - - /** - * Sets the DataNode version. - * - * @param ver the version of DataNode. - * - * @return ExtraDatanodeDetails.Builder - */ - public Builder setVersion(String ver) { - this.version = ver; - return this; - } + /** + * Returns the DataNode setup time. + * + * @return DataNode setup time + */ + public long getSetupTime() { + return setupTime; + } - /** - * Sets the DataNode revision. - * - * @param rev the revision of DataNode. - * - * @return ExtraDatanodeDetails.Builder - */ - public Builder setRevision(String rev) { - this.revision = rev; - return this; - } + /** + * Set DataNode setup time. + * + * @param setupTime DataNode setup time + */ + public void setSetupTime(long setupTime) { + this.setupTime = setupTime; + } - /** - * Sets the DataNode build date. - * - * @param date the build date of DataNode. - * - * @return ExtraDatanodeDetails.Builder - */ - public Builder setBuildDate(String date) { - this.buildDate = date; - return this; - } + /** + * Returns the DataNode revision. + * + * @return DataNode revision + */ + public String getRevision() { + return revision; + } - /** - * Sets the DataNode setup time. - * - * @param time the setup time of DataNode. - * - * @return ExtraDatanodeDetails.Builder - */ - public Builder setSetupTime(long time) { - this.setupTime = time; - return this; - } + /** + * Set DataNode revision. + * + * @param rev DataNode revision + */ + public void setRevision(String rev) { + this.revision = rev; + } - /** - * Builds and returns DatanodeDetails instance. - * - * @return ExtraDatanodeDetails - */ - public ExtraDatanodeDetails build() { - ExtraDatanodeDetails dn = new ExtraDatanodeDetails( - version, setupTime, revision, buildDate); - return dn; - } - } + /** + * Returns the DataNode build date. + * + * @return DataNode build date + */ + public String getBuildDate() { + return buildDate; } + /** + * Set DataNode build date. + * + * @param date DataNode build date + */ + public void setBuildDate(String date) { + this.buildDate = date; + } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java index 27c85eea9d26..06a1bf0f8678 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/MockDatanodeDetails.java @@ -22,8 +22,6 @@ import java.util.Random; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; -import org.apache.hadoop.hdds.protocol.DatanodeDetails.ExtraDatanodeDetails; -import org.apache.hadoop.util.Time; /** * Provides {@link DatanodeDetails} factory methods for testing. @@ -46,7 +44,7 @@ public static DatanodeDetails randomDatanodeDetails() { * @return DatanodeDetails */ public static DatanodeDetails createDatanodeDetails(String hostname, - String loc) { + String loc) { Random random = ThreadLocalRandom.current(); String ipAddress = random.nextInt(256) + "." + random.nextInt(256) @@ -89,18 +87,6 @@ public static DatanodeDetails createDatanodeDetails(String uuid, public static DatanodeDetails createDatanodeDetails(String uuid, String hostname, String ipAddress, String networkLocation, int port) { - ExtraDatanodeDetails extraDatanodeDetails = - DatanodeDetails.newExtraDatanodeDetails( - "0.6.0", Time.now(), - "1346f493fa1690358add7bb9f3e5b52545993f36", - "2020-08-01T16:19Z"); - return createDatanodeDetails(uuid, hostname, ipAddress, - networkLocation, 0, extraDatanodeDetails); - } - - public static DatanodeDetails createDatanodeDetails(String uuid, - String hostname, String ipAddress, String networkLocation, int port, - ExtraDatanodeDetails extraDatanodeDetails) { DatanodeDetails.Port containerPort = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, port); DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( @@ -115,7 +101,6 @@ public static DatanodeDetails createDatanodeDetails(String uuid, .addPort(ratisPort) .addPort(restPort) .setNetworkLocation(networkLocation) - .setExtraDatanodeDetails(extraDatanodeDetails) .build(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 9170ff44a23f..d2674504b9f0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.DatanodeDetails.ExtraDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.utils.HddsServerUtil; @@ -207,15 +206,15 @@ public void start() { datanodeDetails = initializeDatanodeDetails(); datanodeDetails.setHostName(hostname); datanodeDetails.setIpAddress(ip); - ExtraDatanodeDetails extraDatanodeDetails = new ExtraDatanodeDetails( - HddsVersionInfo.HDDS_VERSION_INFO.getVersion(), - Time.now(), - HddsVersionInfo.HDDS_VERSION_INFO.getRevision(), - HddsVersionInfo.HDDS_VERSION_INFO.getDate()); + datanodeDetails.setVersion( + HddsVersionInfo.HDDS_VERSION_INFO.getVersion()); + datanodeDetails.setSetupTime(Time.now()); + datanodeDetails.setRevision( + HddsVersionInfo.HDDS_VERSION_INFO.getRevision()); + datanodeDetails.setBuildDate(HddsVersionInfo.HDDS_VERSION_INFO.getDate()); TracingUtil.initTracing( "HddsDatanodeService." + datanodeDetails.getUuidString() .substring(0, 8), conf); - datanodeDetails.setExtraDatanodeDetails(extraDatanodeDetails); LOG.info("HddsDatanodeService host:{} ip:{}", hostname, ip); // Authenticate Hdds Datanode service if security is enabled if (OzoneSecurityUtil.isSecurityEnabled(conf)) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index 3ec508c9ca9a..60d2bb23f22a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -119,8 +119,7 @@ public EndpointStateMachine.EndPointStates call() throws Exception { datanodeContainerManager.getPipelineReport(); // TODO : Add responses to the command Queue. SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint() - .register(datanodeDetails.getProtoBufMessage(), - datanodeDetails.getExtraDatanodeDetails().getProtoBufMessage(), + .register(datanodeDetails.getExtendedProtoBufMessage(), nodeReport, containerReport, pipelineReportsProto); Preconditions.checkState(UUID.fromString(response.getDatanodeUUID()) .equals(datanodeDetails.getUuid()), diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java index 6e99c3a6bbe1..d9f998f15aca 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java @@ -17,8 +17,7 @@ package org.apache.hadoop.ozone.protocol; import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtraDatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto @@ -74,15 +73,13 @@ SCMHeartbeatResponseProto sendHeartbeat(SCMHeartbeatRequestProto heartbeat) /** * Register Datanode. - * @param datanodeDetails - Datanode Details. - * @param extraDatanodeDetails - Datanode more details. + * @param extendedDatanodeDetailsProto - extended Datanode Details. * @param nodeReport - Node Report. * @param containerReportsRequestProto - Container Reports. * @return SCM Command. */ SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetails, - ExtraDatanodeDetailsProto extraDatanodeDetails, + ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto, NodeReportProto nodeReport, ContainerReportsProto containerReportsRequestProto, PipelineReportsProto pipelineReports) throws IOException; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java index 718f12af83b6..cb55880c5c96 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java @@ -51,7 +51,7 @@ public interface StorageContainerNodeProtocol { /** * Register the node if the node finds that it is not registered with any SCM. - * @param datanodeDetails ExtendedDatanodeDetails + * @param datanodeDetails DatanodeDetails * @param nodeReport NodeReportProto * @param pipelineReport PipelineReportsProto * @return SCMRegisteredResponseProto diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java index 63a69cd7e170..cf624628c22e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java @@ -20,7 +20,7 @@ import com.google.protobuf.ServiceException; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos - .ExtraDatanodeDetailsProto; + .ExtendedDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto @@ -155,24 +155,21 @@ public SCMHeartbeatResponseProto sendHeartbeat( /** * Register Datanode. * - * @param datanodeDetailsProto - Datanode Details - * @param extraDatanodeDetails - Datanode more details. + * @param extendedDatanodeDetailsProto - extended Datanode Details * @param nodeReport - Node Report. * @param containerReportsRequestProto - Container Reports. * @return SCM Command. */ @Override public SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetailsProto, - ExtraDatanodeDetailsProto extraDatanodeDetails, + ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto, NodeReportProto nodeReport, ContainerReportsProto containerReportsRequestProto, PipelineReportsProto pipelineReportsProto) throws IOException { SCMRegisterRequestProto.Builder req = SCMRegisterRequestProto.newBuilder(); - req.setDatanodeDetails(datanodeDetailsProto); - req.setExtraDatanodeDetails(extraDatanodeDetails); + req.setExtendedDatanodeDetails(extendedDatanodeDetailsProto); req.setContainerReport(containerReportsRequestProto); req.setPipelineReports(pipelineReportsProto); req.setNodeReport(nodeReport); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java index 1d2d063eccd3..f27f4f3cf38c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java @@ -68,8 +68,7 @@ public SCMRegisteredResponseProto register( .getContainerReport(); NodeReportProto dnNodeReport = request.getNodeReport(); PipelineReportsProto pipelineReport = request.getPipelineReports(); - return impl.register(request.getDatanodeDetails(), - request.getExtraDatanodeDetails(), dnNodeReport, + return impl.register(request.getExtendedDatanodeDetails(), dnNodeReport, containerRequestProto, pipelineReport); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java index 902677857a93..534f9efdabaa 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java @@ -17,7 +17,8 @@ package org.apache.hadoop.ozone.container.common; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtraDatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos + .ExtendedDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -212,19 +213,20 @@ private void sleepIfNeeded() { /** * Register Datanode. * - * @param datanodeDetailsProto DatanodDetailsProto. + * @param extendedDatanodeDetailsProto ExtendedDatanodDetailsProto. * @return SCM Command. */ @Override public StorageContainerDatanodeProtocolProtos .SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetailsProto, - ExtraDatanodeDetailsProto extraDatanodeDetailsProto, + ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto, NodeReportProto nodeReport, ContainerReportsProto containerReportsRequestProto, PipelineReportsProto pipelineReportsProto) throws IOException { rpcCount.incrementAndGet(); + DatanodeDetailsProto datanodeDetailsProto = + extendedDatanodeDetailsProto.getDatanodeDetails(); updateNodeReport(datanodeDetailsProto, nodeReport); updateContainerReport(containerReportsRequestProto, datanodeDetailsProto); sleepIfNeeded(); diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 904211788332..0c9b26142558 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -49,14 +49,15 @@ message DatanodeDetailsProto { /** The DatanodeDetailsProto is a basic type that will be shared by many Proto, - and we just need some more detail information to register with SCM, so I use - ExtraDatanodeDetailsProto to extend DatanodeDetailsProto for registration. + to reduce the number of fields transfered by ProtoBuf, only need to extend + DatanodeDetailsProto for registering DataNode with SCM and Recon. */ -message ExtraDatanodeDetailsProto { - optional string version = 1; // Datanode version - optional int64 setupTime = 2; - optional string revision = 3; - optional string buildDate = 4; +message ExtendedDatanodeDetailsProto { + required DatanodeDetailsProto datanodeDetails = 1; + optional string version = 2; + optional int64 setupTime = 3; + optional string revision = 4; + optional string buildDate = 5; } /** diff --git a/hadoop-hdds/interface-client/src/main/resources/proto.lock b/hadoop-hdds/interface-client/src/main/resources/proto.lock index 34f6d0ce213a..581ffafc4ef6 100644 --- a/hadoop-hdds/interface-client/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-client/src/main/resources/proto.lock @@ -1538,25 +1538,30 @@ ] }, { - "name": "ExtraDatanodeDetailsProto", + "name": "ExtendedDatanodeDetailsProto", "fields": [ { "id": 1, + "name": "datanodeDetails", + "type": "DatanodeDetailsProto" + }, + { + "id": 2, "name": "version", "type": "string" }, { - "id": 2, + "id": 3, "name": "setupTime", "type": "int64" }, { - "id": 3, + "id": 4, "name": "revision", "type": "string" }, { - "id": 4, + "id": 5, "name": "buildDate", "type": "string" } diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index d0f2dd412d1f..1dc4bcd4d249 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -88,16 +88,10 @@ message SCMVersionResponseProto { } message SCMRegisterRequestProto { - required DatanodeDetailsProto datanodeDetails = 1; - required ExtraDatanodeDetailsProto extraDatanodeDetails = 2; - required NodeReportProto nodeReport = 3; - required ContainerReportsProto containerReport = 4; - required PipelineReportsProto pipelineReports = 5; -} - -message ExtendedDatanodeDetailsProto { - required DatanodeDetailsProto datanodeDetails = 1; - required ExtraDatanodeDetailsProto extraDatanodeDetails = 2; + required ExtendedDatanodeDetailsProto extendedDatanodeDetails = 1; + required NodeReportProto nodeReport = 2; + required ContainerReportsProto containerReport = 3; + required PipelineReportsProto pipelineReports = 4; } /** diff --git a/hadoop-hdds/interface-server/src/main/resources/proto.lock b/hadoop-hdds/interface-server/src/main/resources/proto.lock index 9e9ea78d495c..5022d48be971 100644 --- a/hadoop-hdds/interface-server/src/main/resources/proto.lock +++ b/hadoop-hdds/interface-server/src/main/resources/proto.lock @@ -310,26 +310,21 @@ "fields": [ { "id": 1, - "name": "datanodeDetails", - "type": "DatanodeDetailsProto" + "name": "extendedDatanodeDetails", + "type": "ExtendedDatanodeDetailsProto" }, { "id": 2, - "name": "extraDatanodeDetails", - "type": "ExtraDatanodeDetailsProto" - }, - { - "id": 3, "name": "nodeReport", "type": "NodeReportProto" }, { - "id": 4, + "id": 3, "name": "containerReport", "type": "ContainerReportsProto" }, { - "id": 5, + "id": 4, "name": "pipelineReports", "type": "PipelineReportsProto" } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index 8c0d601e87ac..a2953415cb38 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -31,7 +31,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.DatanodeDetails.ExtraDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; @@ -204,17 +203,13 @@ public SCMVersionResponseProto getVersion(SCMVersionRequestProto @Override public SCMRegisteredResponseProto register( - HddsProtos.DatanodeDetailsProto datanodeDetailsProto, - HddsProtos.ExtraDatanodeDetailsProto extraDatanodeDetailsProto, + HddsProtos.ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto, NodeReportProto nodeReport, ContainerReportsProto containerReportsProto, PipelineReportsProto pipelineReportsProto) throws IOException { DatanodeDetails datanodeDetails = DatanodeDetails - .getFromProtoBuf(datanodeDetailsProto); - ExtraDatanodeDetails extraDatanodeDetails =ExtraDatanodeDetails - .getFromProtoBuf(extraDatanodeDetailsProto); - datanodeDetails.setExtraDatanodeDetails(extraDatanodeDetails); + .getFromProtoBuf(extendedDatanodeDetailsProto); boolean auditSuccess = true; Map auditMap = Maps.newHashMap(); auditMap.put("datanodeDetails", datanodeDetails.toString()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 00b8da806a01..8cad8b0d4561 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -269,9 +269,8 @@ public void testRegister() throws Exception { try (EndpointStateMachine rpcEndPoint = createEndpoint( SCMTestUtils.getConf(), serverAddress, 1000)) { SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint() - .register(nodeToRegister.getProtoBufMessage(), - nodeToRegister.getExtraDatanodeDetails().getProtoBufMessage(), - TestUtils.createNodeReport( + .register(nodeToRegister.getExtendedProtoBufMessage(), TestUtils + .createNodeReport( getStorageReports(nodeToRegister.getUuid())), TestUtils.getRandomContainerReports(10), TestUtils.getRandomPipelineReports()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java index ad3b329ba605..bd022c4f1da2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java @@ -121,10 +121,10 @@ public Response getDatanodes() { .withPipelines(pipelines) .withLeaderCount(leaderCount.get()) .withUUid(datanode.getUuidString()) - .withVersion(datanode.getExtraDatanodeDetails().getVersion()) - .withSetupTime(datanode.getExtraDatanodeDetails().getSetupTime()) - .withRevision(datanode.getExtraDatanodeDetails().getRevision()) - .withBuildDate(datanode.getExtraDatanodeDetails().getBuildDate()) + .withVersion(datanode.getVersion()) + .withSetupTime(datanode.getSetupTime()) + .withRevision(datanode.getRevision()) + .withBuildDate(datanode.getBuildDate()) .build()); }); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java index c11ebbf63a63..96ae806c8737 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/DatanodeDetailsCodec.java @@ -19,7 +19,8 @@ package org.apache.hadoop.ozone.recon.codec; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto.PARSER; +import static org.apache.hadoop.hdds.protocol.proto + .HddsProtos.ExtendedDatanodeDetailsProto.PARSER; import java.io.IOException; @@ -33,7 +34,7 @@ public class DatanodeDetailsCodec implements Codec { @Override public byte[] toPersistedFormat(DatanodeDetails object) throws IOException { - return object.getProtoBufMessage().toByteArray(); + return object.getExtendedProtoBufMessage().toByteArray(); } @Override diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 48f30cc7fbae..5d399e9abe7d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -21,8 +21,9 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos + .ExtendedDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.PipelineID; @@ -128,8 +129,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest { private DatanodeDetails datanodeDetails2; private long containerId = 1L; private ContainerReportsProto containerReportsProto; - private DatanodeDetailsProto datanodeDetailsProto; - private HddsProtos.ExtraDatanodeDetailsProto extraDatanodeDetailsProto; + private ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto; private Pipeline pipeline; private FileCountBySizeDao fileCountBySizeDao; private DSLContext dslContext; @@ -265,14 +265,15 @@ public void setUp() throws Exception { PipelineReportsProto pipelineReportsProto = PipelineReportsProto.newBuilder() .addPipelineReport(pipelineReport).build(); - datanodeDetailsProto = + DatanodeDetailsProto datanodeDetailsProto = DatanodeDetailsProto.newBuilder() .setHostName(host1) .setUuid(datanodeId) .setIpAddress(ip1) .build(); - extraDatanodeDetailsProto = - HddsProtos.ExtraDatanodeDetailsProto.newBuilder() + extendedDatanodeDetailsProto = + HddsProtos.ExtendedDatanodeDetailsProto.newBuilder() + .setDatanodeDetails(datanodeDetailsProto) .setVersion("0.6.0") .setSetupTime(1596347628802l) .setBuildDate("2020-08-01T08:50Z") @@ -296,11 +297,18 @@ public void setUp() throws Exception { .addStorageReport(storageReportProto2).build(); DatanodeDetailsProto datanodeDetailsProto2 = - DatanodeDetailsProto.newBuilder() .setHostName(host2) - .setUuid(datanodeId2) - .setIpAddress(ip2) + .setUuid(datanodeId) + .setIpAddress(ip1) + .build(); + ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto2 = + ExtendedDatanodeDetailsProto.newBuilder() + .setDatanodeDetails(datanodeDetailsProto) + .setVersion("0.6.0") + .setSetupTime(1596347636802l) + .setBuildDate("2020-08-01T08:50Z") + .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); StorageReportProto storageReportProto3 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) @@ -321,11 +329,10 @@ public void setUp() throws Exception { try { reconScm.getDatanodeProtocolServer() - .register(datanodeDetailsProto, extraDatanodeDetailsProto, - nodeReportProto, containerReportsProto, pipelineReportsProto); + .register(extendedDatanodeDetailsProto, nodeReportProto, + containerReportsProto, pipelineReportsProto); reconScm.getDatanodeProtocolServer() - .register(datanodeDetailsProto2, extraDatanodeDetailsProto, - nodeReportProto2, + .register(extendedDatanodeDetailsProto2, nodeReportProto2, ContainerReportsProto.newBuilder().build(), PipelineReportsProto.newBuilder().build()); // Process all events in the event queue @@ -375,39 +382,39 @@ private void testDatanodeResponse(DatanodeMetadata datanodeMetadata) throws IOException { String hostname = datanodeMetadata.getHostname(); switch (hostname) { - case host1: - Assert.assertEquals(75000, - datanodeMetadata.getDatanodeStorageReport().getCapacity()); - Assert.assertEquals(15400, - datanodeMetadata.getDatanodeStorageReport().getRemaining()); - Assert.assertEquals(35000, - datanodeMetadata.getDatanodeStorageReport().getUsed()); - - Assert.assertEquals(1, datanodeMetadata.getPipelines().size()); - Assert.assertEquals(pipelineId, - datanodeMetadata.getPipelines().get(0).getPipelineID().toString()); - Assert.assertEquals(pipeline.getFactor().getNumber(), - datanodeMetadata.getPipelines().get(0).getReplicationFactor()); - Assert.assertEquals(pipeline.getType().toString(), - datanodeMetadata.getPipelines().get(0).getReplicationType()); - Assert.assertEquals(pipeline.getLeaderNode().getHostName(), - datanodeMetadata.getPipelines().get(0).getLeaderNode()); - Assert.assertEquals(1, datanodeMetadata.getLeaderCount()); - break; - case host2: - Assert.assertEquals(130000, - datanodeMetadata.getDatanodeStorageReport().getCapacity()); - Assert.assertEquals(17800, - datanodeMetadata.getDatanodeStorageReport().getRemaining()); - Assert.assertEquals(80000, - datanodeMetadata.getDatanodeStorageReport().getUsed()); - - Assert.assertEquals(0, datanodeMetadata.getPipelines().size()); - Assert.assertEquals(0, datanodeMetadata.getLeaderCount()); - break; - default: - Assert.fail(String.format("Datanode %s not registered", - hostname)); + case host1: + Assert.assertEquals(75000, + datanodeMetadata.getDatanodeStorageReport().getCapacity()); + Assert.assertEquals(15400, + datanodeMetadata.getDatanodeStorageReport().getRemaining()); + Assert.assertEquals(35000, + datanodeMetadata.getDatanodeStorageReport().getUsed()); + + Assert.assertEquals(1, datanodeMetadata.getPipelines().size()); + Assert.assertEquals(pipelineId, + datanodeMetadata.getPipelines().get(0).getPipelineID().toString()); + Assert.assertEquals(pipeline.getFactor().getNumber(), + datanodeMetadata.getPipelines().get(0).getReplicationFactor()); + Assert.assertEquals(pipeline.getType().toString(), + datanodeMetadata.getPipelines().get(0).getReplicationType()); + Assert.assertEquals(pipeline.getLeaderNode().getHostName(), + datanodeMetadata.getPipelines().get(0).getLeaderNode()); + Assert.assertEquals(1, datanodeMetadata.getLeaderCount()); + break; + case host2: + Assert.assertEquals(130000, + datanodeMetadata.getDatanodeStorageReport().getCapacity()); + Assert.assertEquals(17800, + datanodeMetadata.getDatanodeStorageReport().getRemaining()); + Assert.assertEquals(80000, + datanodeMetadata.getDatanodeStorageReport().getUsed()); + + Assert.assertEquals(0, datanodeMetadata.getPipelines().size()); + Assert.assertEquals(0, datanodeMetadata.getLeaderCount()); + break; + default: + Assert.fail(String.format("Datanode %s not registered", + hostname)); } } @@ -639,7 +646,8 @@ private void waitAndCheckConditionAfterHeartbeat(Callable check) SCMHeartbeatRequestProto heartbeatRequestProto = SCMHeartbeatRequestProto.newBuilder() .setContainerReport(containerReportsProto) - .setDatanodeDetails(datanodeDetailsProto) + .setDatanodeDetails(extendedDatanodeDetailsProto + .getDatanodeDetails()) .build(); reconScm.getDatanodeProtocolServer().sendHeartbeat(heartbeatRequestProto); LambdaTestUtils.await(30000, 1000, check); From d55c2645346b8511cb18c0965fe91ebbf837b9eb Mon Sep 17 00:00:00 2001 From: HuangTao Date: Sat, 8 Aug 2020 01:41:15 +0800 Subject: [PATCH 3/7] HDDS-4039. Revert the format of some lines --- .../hadoop/hdds/protocol/DatanodeDetails.java | 3 +- .../hadoop/ozone/HddsDatanodeService.java | 5 +- .../states/datanode/RunningDatanodeState.java | 2 +- .../StorageContainerDatanodeProtocol.java | 2 +- .../hadoop/hdds/scm/node/DatanodeInfo.java | 2 +- .../hadoop/hdds/scm/node/NodeManager.java | 4 +- .../hdds/scm/node/NodeStateManager.java | 56 +++++++++---------- .../hadoop/hdds/scm/node/SCMNodeManager.java | 4 +- .../hdds/scm/node/states/NodeStateMap.java | 4 +- 9 files changed, 40 insertions(+), 42 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index a4d24e4a2fe8..73a138910415 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -231,7 +231,6 @@ public static DatanodeDetails getFromProtoBuf( return builder.build(); } - /** * Returns a ExtendedDatanodeDetails from the protocol buffers. * @@ -628,7 +627,7 @@ public int hashCode() { * @param anObject * The object to compare this {@code Port} against * @return {@code true} if the given object represents a {@code Port} - and has the same name, {@code false} otherwise + and has the same name, {@code false} otherwise */ @Override public boolean equals(Object anObject) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index d2674504b9f0..cfb22e30dcd2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -202,7 +202,6 @@ public void start() { try { String hostname = HddsUtils.getHostName(conf); String ip = InetAddress.getByName(hostname).getHostAddress(); - datanodeDetails = initializeDatanodeDetails(); datanodeDetails.setHostName(hostname); datanodeDetails.setIpAddress(ip); @@ -246,8 +245,8 @@ public void start() { if (OzoneSecurityUtil.isSecurityEnabled(conf)) { initializeCertificateClient(conf); } - datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, - conf, dnCertClient, this::terminateDatanode); + datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf, + dnCertClient, this::terminateDatanode); try { httpServer = new HddsDatanodeHttpServer(conf); httpServer.start(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java index 45f510fd25d5..b0cfb4ce001a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java @@ -178,7 +178,7 @@ private Callable getEndPointTask( * @return next container state. */ private DatanodeStateMachine.DatanodeStates - computeNextContainerState( + computeNextContainerState( List> results) { for (Future state : results) { try { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java index d9f998f15aca..64f294388944 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java @@ -53,7 +53,7 @@ public interface StorageContainerDatanodeProtocol { /** * Version 1: Initial version. */ - long versionID = 1L; + long versionID = 1L; /** * Returns SCM version. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java index 5eefa4f3f6c3..b39440f41f99 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java @@ -127,7 +127,7 @@ public int getHealthyVolumeCount() { */ private int getFailedVolumeCount() { return (int) storageReports.stream(). - filter(e -> e.hasFailed() ? e.getFailed() : false).count(); + filter(e -> e.hasFailed() ? e.getFailed() : false).count(); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index f120b3b6d4c5..df21b84eafda 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -146,7 +146,7 @@ public interface NodeManager extends StorageContainerNodeProtocol, * use addDatanodeInContainerMap call. */ void addContainer(DatanodeDetails datanodeDetails, - ContainerID containerId) throws NodeNotFoundException; + ContainerID containerId) throws NodeNotFoundException; /** * Remaps datanode to containers mapping to the new set of containers. @@ -181,7 +181,7 @@ Set getContainers(DatanodeDetails datanodeDetails) * @param nodeReport */ void processNodeReport(DatanodeDetails datanodeDetails, - NodeReportProto nodeReport); + NodeReportProto nodeReport); /** * Get list of SCMCommands in the Command Queue for a particular Datanode. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index dc43d164f701..d51961f7d471 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -481,7 +481,7 @@ public void removePipeline(Pipeline pipeline) { * use addDatanodeInContainerMap call. */ public void addContainer(final UUID uuid, - final ContainerID containerId) + final ContainerID containerId) throws NodeNotFoundException { nodeStateMap.addContainer(uuid, containerId); } @@ -598,33 +598,33 @@ private void checkNodesHealth() { for (UUID id : nodes) { DatanodeInfo node = nodeStateMap.getNodeInfo(id); switch (state) { - case HEALTHY: - // Move the node to STALE if the last heartbeat time is less than - // configured stale-node interval. - updateNodeState(node, staleNodeCondition, state, - NodeLifeCycleEvent.TIMEOUT); - break; - case STALE: - // Move the node to DEAD if the last heartbeat time is less than - // configured dead-node interval. - updateNodeState(node, deadNodeCondition, state, - NodeLifeCycleEvent.TIMEOUT); - // Restore the node if we have received heartbeat before configured - // stale-node interval. - updateNodeState(node, healthyNodeCondition, state, - NodeLifeCycleEvent.RESTORE); - break; - case DEAD: - // Resurrect the node if we have received heartbeat before - // configured stale-node interval. - updateNodeState(node, healthyNodeCondition, state, - NodeLifeCycleEvent.RESURRECT); - break; - // We don't do anything for DECOMMISSIONING and DECOMMISSIONED in - // heartbeat processing. - case DECOMMISSIONING: - case DECOMMISSIONED: - default: + case HEALTHY: + // Move the node to STALE if the last heartbeat time is less than + // configured stale-node interval. + updateNodeState(node, staleNodeCondition, state, + NodeLifeCycleEvent.TIMEOUT); + break; + case STALE: + // Move the node to DEAD if the last heartbeat time is less than + // configured dead-node interval. + updateNodeState(node, deadNodeCondition, state, + NodeLifeCycleEvent.TIMEOUT); + // Restore the node if we have received heartbeat before configured + // stale-node interval. + updateNodeState(node, healthyNodeCondition, state, + NodeLifeCycleEvent.RESTORE); + break; + case DEAD: + // Resurrect the node if we have received heartbeat before + // configured stale-node interval. + updateNodeState(node, healthyNodeCondition, state, + NodeLifeCycleEvent.RESURRECT); + break; + // We don't do anything for DECOMMISSIONING and DECOMMISSIONED in + // heartbeat processing. + case DECOMMISSIONING: + case DECOMMISSIONED: + default: } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index b17d9f446403..1a0cec3b2176 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -520,10 +520,10 @@ public int getNumHealthyVolumes(List dnList) { for (DatanodeDetails dn : dnList) { try { volumeCountList.add(nodeStateManager.getNode(dn). - getHealthyVolumeCount()); + getHealthyVolumeCount()); } catch (NodeNotFoundException e) { LOG.warn("Cannot generate NodeStat, datanode {} not found.", - dn.getUuid()); + dn.getUuid()); } } Preconditions.checkArgument(!volumeCountList.isEmpty()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java index 02151ab9b337..baebef5ccf87 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java @@ -105,7 +105,7 @@ public void addNode(DatanodeDetails datanodeDetails, NodeState nodeState) * @throws NodeNotFoundException if the node is not present */ public void updateNodeState(UUID nodeId, NodeState currentState, - NodeState newState)throws NodeNotFoundException { + NodeState newState)throws NodeNotFoundException { lock.writeLock().lock(); try { checkIfNodeExist(nodeId); @@ -234,7 +234,7 @@ public NodeState getNodeState(UUID uuid) throws NodeNotFoundException { * use addDatanodeInContainerMap call. */ public void addContainer(final UUID uuid, - final ContainerID containerId) + final ContainerID containerId) throws NodeNotFoundException { lock.writeLock().lock(); try { From 5053e96d44a7b053e7f3062ea2e0ea3d90998675 Mon Sep 17 00:00:00 2001 From: HuangTao Date: Sat, 8 Aug 2020 01:55:49 +0800 Subject: [PATCH 4/7] HDDS-4039. Fix checkstyle --- ...torageContainerDatanodeProtocolClientSideTranslatorPB.java | 1 - .../java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java index cf624628c22e..4da8b2754559 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java @@ -18,7 +18,6 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos .ExtendedDatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 5d399e9abe7d..325c8ef963d6 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -275,7 +275,7 @@ public void setUp() throws Exception { HddsProtos.ExtendedDatanodeDetailsProto.newBuilder() .setDatanodeDetails(datanodeDetailsProto) .setVersion("0.6.0") - .setSetupTime(1596347628802l) + .setSetupTime(1596347628802L) .setBuildDate("2020-08-01T08:50Z") .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); @@ -306,7 +306,7 @@ public void setUp() throws Exception { ExtendedDatanodeDetailsProto.newBuilder() .setDatanodeDetails(datanodeDetailsProto) .setVersion("0.6.0") - .setSetupTime(1596347636802l) + .setSetupTime(1596347636802L) .setBuildDate("2020-08-01T08:50Z") .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); From 1488792fe072037ed73394bd1fb627eee9ab60b7 Mon Sep 17 00:00:00 2001 From: HuangTao Date: Sat, 8 Aug 2020 03:20:07 +0800 Subject: [PATCH 5/7] HDDS-4039. Fix unittest and review --- hadoop-hdds/interface-client/src/main/proto/hdds.proto | 5 +++++ .../org/apache/hadoop/ozone/recon/api/TestEndpoints.java | 6 +++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 0c9b26142558..7fcb84df8edc 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -43,6 +43,11 @@ message DatanodeDetailsProto { // network name, can be Ip address or host name, depends optional string networkName = 6; optional string networkLocation = 7; // Network topology location + // TODO(runitao): remove it in next release + // both version and setupTime are deprecated and moved into ExtendedDatanodeDetailsProto + // keep here just for backward compatibility + optional string version = 8; + optional int64 setupTime = 9; // TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128 and mark as required optional UUID uuid128 = 100; // UUID with 128 bits assigned to the Datanode. } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 325c8ef963d6..b99f30cca948 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -299,12 +299,12 @@ public void setUp() throws Exception { DatanodeDetailsProto datanodeDetailsProto2 = DatanodeDetailsProto.newBuilder() .setHostName(host2) - .setUuid(datanodeId) - .setIpAddress(ip1) + .setUuid(datanodeId2) + .setIpAddress(ip2) .build(); ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto2 = ExtendedDatanodeDetailsProto.newBuilder() - .setDatanodeDetails(datanodeDetailsProto) + .setDatanodeDetails(datanodeDetailsProto2) .setVersion("0.6.0") .setSetupTime(1596347636802L) .setBuildDate("2020-08-01T08:50Z") From 9d53419d8073e0cb519a78ae27825d19c11cf3fe Mon Sep 17 00:00:00 2001 From: HuangTao Date: Wed, 12 Aug 2020 14:35:28 +0800 Subject: [PATCH 6/7] HDDS-4039. Address the review --- hadoop-hdds/interface-client/src/main/proto/hdds.proto | 5 ----- 1 file changed, 5 deletions(-) diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 7fcb84df8edc..0c9b26142558 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -43,11 +43,6 @@ message DatanodeDetailsProto { // network name, can be Ip address or host name, depends optional string networkName = 6; optional string networkLocation = 7; // Network topology location - // TODO(runitao): remove it in next release - // both version and setupTime are deprecated and moved into ExtendedDatanodeDetailsProto - // keep here just for backward compatibility - optional string version = 8; - optional int64 setupTime = 9; // TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128 and mark as required optional UUID uuid128 = 100; // UUID with 128 bits assigned to the Datanode. } From 61df4ada8c285b26d18a4a072b956ff166879b66 Mon Sep 17 00:00:00 2001 From: HuangTao Date: Mon, 14 Sep 2020 10:29:00 +0800 Subject: [PATCH 7/7] trigger new CI check