diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java index 672cb894e8e8..315821a90044 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java @@ -43,14 +43,6 @@ public class DatanodeRatisServerConfig { ) private long requestTimeOut = Duration.ofSeconds(60).toMillis(); - public long getRequestTimeOut() { - return requestTimeOut; - } - - public void setRequestTimeOut(Duration duration) { - this.requestTimeOut = duration.toMillis(); - } - @Config(key = "watch.timeout", defaultValue = "30s", type = ConfigType.TIME, @@ -63,14 +55,6 @@ public void setRequestTimeOut(Duration duration) { ) private long watchTimeOut = Duration.ofSeconds(30).toMillis(); - public long getWatchTimeOut() { - return watchTimeOut; - } - - public void setWatchTimeOut(Duration duration) { - this.watchTimeOut = duration.toMillis(); - } - @Config(key = "notification.no-leader.timeout", defaultValue = "300s", type = ConfigType.TIME, @@ -81,14 +65,6 @@ public void setWatchTimeOut(Duration duration) { ) private long noLeaderTimeout = Duration.ofSeconds(300).toMillis(); - public long getNoLeaderTimeout() { - return noLeaderTimeout; - } - - public void setNoLeaderTimeout(Duration duration) { - this.noLeaderTimeout = duration.toMillis(); - } - @Config(key = "rpc.slowness.timeout", defaultValue = "300s", type = ConfigType.TIME, @@ -99,14 +75,6 @@ public void setNoLeaderTimeout(Duration duration) { ) private long followerSlownessTimeout = Duration.ofSeconds(300).toMillis(); - public long getFollowerSlownessTimeout() { - return followerSlownessTimeout; - } - - public void setFollowerSlownessTimeout(Duration duration) { - this.followerSlownessTimeout = duration.toMillis(); - } - @Config(key = "write.element-limit", defaultValue = "1024", type = ConfigType.INT, @@ -116,14 +84,6 @@ public void setFollowerSlownessTimeout(Duration duration) { ) private int leaderNumPendingRequests; - public int getLeaderNumPendingRequests() { - return leaderNumPendingRequests; - } - - public void setLeaderNumPendingRequests(int leaderNumPendingRequests) { - this.leaderNumPendingRequests = leaderNumPendingRequests; - } - @Config(key = "datastream.request.threads", defaultValue = "20", type = ConfigType.INT, @@ -133,14 +93,6 @@ public void setLeaderNumPendingRequests(int leaderNumPendingRequests) { ) private int streamRequestThreads; - public int getStreamRequestThreads() { - return streamRequestThreads; - } - - public void setStreamRequestThreads(int streamRequestThreads) { - this.streamRequestThreads = streamRequestThreads; - } - @Config(key = "datastream.client.pool.size", defaultValue = "10", type = ConfigType.INT, @@ -150,14 +102,6 @@ public void setStreamRequestThreads(int streamRequestThreads) { ) private int clientPoolSize; - public int getClientPoolSize() { - return clientPoolSize; - } - - public void setClientPoolSize(int clientPoolSize) { - this.clientPoolSize = clientPoolSize; - } - @Config(key = "delete.ratis.log.directory", defaultValue = "true", type = ConfigType.BOOLEAN, @@ -167,14 +111,6 @@ public void setClientPoolSize(int clientPoolSize) { ) private boolean shouldDeleteRatisLogDirectory; - public boolean shouldDeleteRatisLogDirectory() { - return shouldDeleteRatisLogDirectory; - } - - public void setLeaderNumPendingRequests(boolean delete) { - this.shouldDeleteRatisLogDirectory = delete; - } - @Config(key = "leaderelection.pre-vote", defaultValue = "true", type = ConfigType.BOOLEAN, @@ -183,14 +119,6 @@ public void setLeaderNumPendingRequests(boolean delete) { ) private boolean preVoteEnabled = true; - public boolean isPreVoteEnabled() { - return preVoteEnabled; - } - - public void setPreVote(boolean preVote) { - this.preVoteEnabled = preVote; - } - /** @see RaftServerConfigKeys.Log.Appender#WAIT_TIME_MIN_KEY */ @Config(key = "log.appender.wait-time.min", defaultValue = "0us", @@ -204,6 +132,78 @@ public void setPreVote(boolean preVote) { ) private long logAppenderWaitTimeMin; + public long getRequestTimeOut() { + return requestTimeOut; + } + + public void setRequestTimeOut(Duration duration) { + this.requestTimeOut = duration.toMillis(); + } + + public long getWatchTimeOut() { + return watchTimeOut; + } + + public void setWatchTimeOut(Duration duration) { + this.watchTimeOut = duration.toMillis(); + } + + public long getNoLeaderTimeout() { + return noLeaderTimeout; + } + + public void setNoLeaderTimeout(Duration duration) { + this.noLeaderTimeout = duration.toMillis(); + } + + public long getFollowerSlownessTimeout() { + return followerSlownessTimeout; + } + + public void setFollowerSlownessTimeout(Duration duration) { + this.followerSlownessTimeout = duration.toMillis(); + } + + public int getLeaderNumPendingRequests() { + return leaderNumPendingRequests; + } + + public void setLeaderNumPendingRequests(int leaderNumPendingRequests) { + this.leaderNumPendingRequests = leaderNumPendingRequests; + } + + public int getStreamRequestThreads() { + return streamRequestThreads; + } + + public void setStreamRequestThreads(int streamRequestThreads) { + this.streamRequestThreads = streamRequestThreads; + } + + public int getClientPoolSize() { + return clientPoolSize; + } + + public void setClientPoolSize(int clientPoolSize) { + this.clientPoolSize = clientPoolSize; + } + + public boolean shouldDeleteRatisLogDirectory() { + return shouldDeleteRatisLogDirectory; + } + + public void setLeaderNumPendingRequests(boolean delete) { + this.shouldDeleteRatisLogDirectory = delete; + } + + public boolean isPreVoteEnabled() { + return preVoteEnabled; + } + + public void setPreVote(boolean preVote) { + this.preVoteEnabled = preVote; + } + public long getLogAppenderWaitTimeMin() { return logAppenderWaitTimeMin; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java index c158b0c4556a..0d60620d87d0 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java @@ -43,6 +43,8 @@ @InterfaceStability.Evolving public interface SpaceUsageCheckFactory { + String CONFIG_PREFIX = "hdds.datanode.du.factory"; + /** * Creates configuration for the HDDS volume rooted at {@code dir}. * @@ -110,8 +112,6 @@ static DUFactory defaultImplementation() { return new DUFactory(); } - String CONFIG_PREFIX = "hdds.datanode.du.factory"; - /** * Configuration for {@link SpaceUsageCheckFactory}. */ diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java index d97acfbece93..38f45092dffc 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/MoveDataNodePair.java @@ -39,10 +39,6 @@ public class MoveDataNodePair { MoveDataNodePair.class, DelegatedCodec.CopyType.SHALLOW); - public static Codec getCodec() { - return CODEC; - } - /** * source datanode of current move option. */ @@ -58,6 +54,10 @@ public MoveDataNodePair(DatanodeDetails src, DatanodeDetails tgt) { this.tgt = tgt; } + public static Codec getCodec() { + return CODEC; + } + public DatanodeDetails getTgt() { return tgt; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenGenerator.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenGenerator.java index 45661b286bff..f6a6c6c52caa 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenGenerator.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenGenerator.java @@ -25,19 +25,6 @@ * Generates container tokens. */ public interface ContainerTokenGenerator { - - /** - * Shortcut for generating encoded token for current user. - * @throws UncheckedIOException if user lookup or URL-encoding fails - */ - String generateEncodedToken(ContainerID containerID); - - /** - * Generate token for the container. - */ - Token generateToken(String user, - ContainerID containerID); - /** * No-op implementation for when container tokens are disabled. */ @@ -48,10 +35,19 @@ public String generateEncodedToken(ContainerID containerID) { } @Override - public Token generateToken(String user, - ContainerID containerID) { + public Token generateToken(String user, ContainerID containerID) { return new Token<>(); } }; + /** + * Shortcut for generating encoded token for current user. + * @throws UncheckedIOException if user lookup or URL-encoding fails + */ + String generateEncodedToken(ContainerID containerID); + + /** + * Generate token for the container. + */ + Token generateToken(String user, ContainerID containerID); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java index 2544c5fc699e..0bbd28b5fe19 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/CertInfo.java @@ -40,10 +40,6 @@ public final class CertInfo implements Comparable, Serializable { CertInfo::getProtobuf, CertInfo.class); - public static Codec getCodec() { - return CODEC; - } - static final Comparator COMPARATOR = Comparator.comparingLong(CertInfo::getTimestamp); @@ -56,6 +52,10 @@ private CertInfo(X509Certificate x509Certificate, long timestamp) { this.timestamp = timestamp; } + public static Codec getCodec() { + return CODEC; + } + public static CertInfo fromProtobuf(CertInfoProto info) throws IOException { return new CertInfo.Builder() .setX509Certificate(info.getX509Certificate()) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java index 3b76325226b6..06008dd4f102 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java @@ -1703,12 +1703,12 @@ private void initHttpHeaderMap() { public enum XFrameOption { DENY("DENY"), SAMEORIGIN("SAMEORIGIN"), ALLOWFROM("ALLOW-FROM"); + private final String name; + XFrameOption(String name) { this.name = name; } - private final String name; - @Override public String toString() { return this.name; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2Metrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2Metrics.java index 4a44b2e0ec4f..d013f060be0c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2Metrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2Metrics.java @@ -31,28 +31,8 @@ */ @InterfaceAudience.Private public final class HttpServer2Metrics implements MetricsSource { - enum HttpServer2MetricsInfo implements MetricsInfo { - SERVER_NAME("HttpServer2 Metrics."), - HttpServerThreadCount("Number of threads in the pool."), - HttpServerIdleThreadCount("Number of idle threads but not reserved."), - HttpServerMaxThreadCount("Maximum number of threads in the pool."), - HttpServerThreadQueueWaitingTaskCount( - "The number of jobs in the queue waiting for a thread"); - - private final String desc; - - HttpServer2MetricsInfo(String desc) { - this.desc = desc; - } - - @Override - public String description() { - return desc; - } - } - public static final String SOURCE_NAME = - HttpServer2Metrics.class.getSimpleName(); + public static final String SOURCE_NAME = HttpServer2Metrics.class.getSimpleName(); public static final String NAME = HttpServer2Metrics.class.getSimpleName(); @@ -91,4 +71,24 @@ public void unRegister() { MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(NAME); } + + enum HttpServer2MetricsInfo implements MetricsInfo { + SERVER_NAME("HttpServer2 Metrics."), + HttpServerThreadCount("Number of threads in the pool."), + HttpServerIdleThreadCount("Number of idle threads but not reserved."), + HttpServerMaxThreadCount("Maximum number of threads in the pool."), + HttpServerThreadQueueWaitingTaskCount( + "The number of jobs in the queue waiting for a thread"); + + private final String desc; + + HttpServer2MetricsInfo(String desc) { + this.desc = desc; + } + + @Override + public String description() { + return desc; + } + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 0207fec264d0..07a8da2c90e8 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -112,16 +112,15 @@ */ public final class HddsServerUtil { - private HddsServerUtil() { - } + private static final Logger LOG = LoggerFactory.getLogger(HddsServerUtil.class); private static final int SHUTDOWN_HOOK_PRIORITY = 0; public static final String OZONE_RATIS_SNAPSHOT_COMPLETE_FLAG_NAME = "OZONE_RATIS_SNAPSHOT_COMPLETE"; - private static final Logger LOG = LoggerFactory.getLogger( - HddsServerUtil.class); + private HddsServerUtil() { + } /** * Add protobuf-based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/NettyMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/NettyMetrics.java index 9af5b0b631e3..b09bada6d555 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/NettyMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/NettyMetrics.java @@ -31,25 +31,8 @@ * This class emits Netty metrics. */ public final class NettyMetrics implements MetricsSource { - private enum MetricsInfos implements MetricsInfo { - USED_DIRECT_MEM("Used direct memory."), - MAX_DIRECT_MEM("Max direct memory."); - - private final String desc; - - MetricsInfos(String desc) { - this.desc = desc; - } - - @Override - public String description() { - return desc; - } - } - - public static final String SOURCE_NAME = - NettyMetrics.class.getSimpleName(); + public static final String SOURCE_NAME = NettyMetrics.class.getSimpleName(); public static NettyMetrics create() { MetricsSystem ms = DefaultMetricsSystem.instance(); @@ -57,7 +40,6 @@ public static NettyMetrics create() { return ms.register(SOURCE_NAME, "Netty metrics", metrics); } - @Override public void getMetrics(MetricsCollector collector, boolean all) { MetricsRecordBuilder recordBuilder = collector.addRecord(SOURCE_NAME) @@ -71,4 +53,20 @@ public void unregister() { MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(SOURCE_NAME); } + + private enum MetricsInfos implements MetricsInfo { + USED_DIRECT_MEM("Used direct memory."), + MAX_DIRECT_MEM("Max direct memory."); + + private final String desc; + + MetricsInfos(String desc) { + this.desc = desc; + } + + @Override + public String description() { + return desc; + } + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TableCacheMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TableCacheMetrics.java index 5dca3f222861..70142adea028 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TableCacheMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TableCacheMetrics.java @@ -30,27 +30,8 @@ * This class emits table level cache metrics. */ public final class TableCacheMetrics implements MetricsSource { - private enum MetricsInfos implements MetricsInfo { - TableName("Table Name."), - Size("Size of the cache."), - HitCount("Number of time the lookup methods return a cached value."), - MissCount("Number of times the requested value is not in the cache."), - IterationCount("Number of times the table cache is iterated through."); - private final String desc; - - MetricsInfos(String desc) { - this.desc = desc; - } - - @Override - public String description() { - return desc; - } - } - - public static final String SOURCE_NAME = - TableCacheMetrics.class.getSimpleName(); + public static final String SOURCE_NAME = TableCacheMetrics.class.getSimpleName(); private final TableCache cache; private final String tableName; @@ -89,4 +70,23 @@ public void unregister() { MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(getSourceName()); } + + private enum MetricsInfos implements MetricsInfo { + TableName("Table Name."), + Size("Size of the cache."), + HitCount("Number of time the lookup methods return a cached value."), + MissCount("Number of times the requested value is not in the cache."), + IterationCount("Number of times the table cache is iterated through."); + + private final String desc; + + MetricsInfos(String desc) { + this.desc = desc; + } + + @Override + public String description() { + return desc; + } + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java index 5be9288621c3..be0f81ac8b0c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java @@ -45,6 +45,22 @@ public final class TransactionInfo implements Comparable { TransactionInfo.class, DelegatedCodec.CopyType.SHALLOW); + public static final TransactionInfo DEFAULT_VALUE = valueOf(0, -1); + + /** In non-Ratis clusters, term is -1. */ + public static final long NON_RATIS_TERM = -1; + + /** + * Use {@link SnapshotInfo} to store (term, index) + * which is the Ratis Log term-index in Ratis enabled cluster. + * In non-Ratis clusters, term is -1 and index is the unique transaction index + * in OzoneManagerProtocolServerSideTranslatorPB#transactionIndex. + */ + private final SnapshotInfo snapshotInfo; + + /** The string need to be persisted in OM DB. */ + private final String transactionInfoString; + public static Codec getCodec() { return CODEC; } @@ -78,25 +94,11 @@ public int compareTo(TransactionInfo info) { return this.getTermIndex().compareTo(info.getTermIndex()); } - public static final TransactionInfo DEFAULT_VALUE = valueOf(0, -1); - - /** In non-Ratis clusters, term is -1. */ - public static final long NON_RATIS_TERM = -1; /** For non-Ratis case. */ public static TermIndex getTermIndex(long transactionIndex) { return TermIndex.valueOf(NON_RATIS_TERM, transactionIndex); } - /** - * Use {@link SnapshotInfo} to store (term, index) - * which is the Ratis Log term-index in Ratis enabled cluster. - * In non-Ratis clusters, term is -1 and index is the unique transaction index - * in OzoneManagerProtocolServerSideTranslatorPB#transactionIndex. - */ - private final SnapshotInfo snapshotInfo; - /** The string need to be persisted in OM DB. */ - private final String transactionInfoString; - private TransactionInfo(TermIndex termIndex) { this.transactionInfoString = termIndex.getTerm() + TRANSACTION_INFO_SPLIT_KEY + termIndex.getIndex(); this.snapshotInfo = new SnapshotInfo() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java index 82fa687ccca8..7d7d7ad77106 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java @@ -31,6 +31,9 @@ * This class is immutable. */ public final class CodecRegistry { + + private final CodecMap valueCodecs; + /** To build {@link CodecRegistry}. */ public static class Builder { private final Map, Codec> codecs = new HashMap<>(); @@ -78,8 +81,6 @@ Codec get(List> classes) { } } - private final CodecMap valueCodecs; - private CodecRegistry(Map, Codec> valueCodecs) { this.valueCodecs = new CodecMap(valueCodecs); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java index 6dc8b994ba5d..89326bb4223a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBColumnFamilyDefinition.java @@ -32,23 +32,6 @@ * @param they type of the value. */ public class DBColumnFamilyDefinition { - public static Map> newUnmodifiableMap( - DBColumnFamilyDefinition... families) { - return newUnmodifiableMap(Collections.emptyMap(), families); - } - - public static Map> newUnmodifiableMap( - Map> existing, - DBColumnFamilyDefinition... families) { - return CollectionUtils.newUnmodifiableMap(Arrays.asList(families), - DBColumnFamilyDefinition::getName, existing); - } - - public static Map>> - newUnmodifiableMultiMap(DBColumnFamilyDefinition... families) { - return CollectionUtils.newUnmodifiableMultiMap(Arrays.asList(families), - DBColumnFamilyDefinition::getName); - } private final String tableName; @@ -65,6 +48,25 @@ public DBColumnFamilyDefinition(String tableName, Codec keyCodec, Codec> newUnmodifiableMap( + DBColumnFamilyDefinition... families) { + return newUnmodifiableMap(Collections.emptyMap(), families); + } + + public static Map> newUnmodifiableMap( + Map> existing, + DBColumnFamilyDefinition... families) { + return CollectionUtils.newUnmodifiableMap(Arrays.asList(families), + DBColumnFamilyDefinition::getName, existing); + } + + public static Map>> newUnmodifiableMultiMap( + DBColumnFamilyDefinition... families + ) { + return CollectionUtils.newUnmodifiableMultiMap(Arrays.asList(families), + DBColumnFamilyDefinition::getName); + } + public Table getTable(DBStore db) throws IOException { return db.getTable(tableName, getKeyType(), getValueType()); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java index cbb94b768a8f..93e50a2d18c7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java @@ -43,6 +43,14 @@ public class RDBBatchOperation implements BatchOperation { static final Logger LOG = LoggerFactory.getLogger(RDBBatchOperation.class); + private static final AtomicInteger BATCH_COUNT = new AtomicInteger(); + + private final String name = "Batch-" + BATCH_COUNT.getAndIncrement(); + + private final ManagedWriteBatch writeBatch; + + private final OpCache opCache = new OpCache(); + private enum Op { DELETE } private static void debug(Supplier message) { @@ -122,6 +130,9 @@ public String toString() { /** Cache and deduplicate db ops (put/delete). */ private class OpCache { + /** A (family name -> {@link FamilyCache}) map. */ + private final Map name2cache = new HashMap<>(); + /** A cache for a {@link ColumnFamily}. */ private class FamilyCache { private final ColumnFamily family; @@ -262,9 +273,6 @@ public String toString() { } } - /** A (family name -> {@link FamilyCache}) map. */ - private final Map name2cache = new HashMap<>(); - void put(ColumnFamily f, CodecBuffer key, CodecBuffer value) { name2cache.computeIfAbsent(f.getName(), k -> new FamilyCache(f)) .put(key, value); @@ -320,12 +328,6 @@ String getCommitString() { } } - private static final AtomicInteger BATCH_COUNT = new AtomicInteger(); - - private final String name = "Batch-" + BATCH_COUNT.getAndIncrement(); - private final ManagedWriteBatch writeBatch; - private final OpCache opCache = new OpCache(); - public RDBBatchOperation() { writeBatch = new ManagedWriteBatch(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBMetrics.java index 5ff47c120ad9..d84ee57b5216 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBMetrics.java @@ -28,11 +28,21 @@ */ public class RDBMetrics { - private static final String SOURCE_NAME = - RDBMetrics.class.getSimpleName(); + private static final String SOURCE_NAME = RDBMetrics.class.getSimpleName(); private static RDBMetrics instance; + private @Metric MutableCounterLong numDBKeyMayExistChecks; + private @Metric MutableCounterLong numDBKeyMayExistMisses; + + private @Metric MutableCounterLong numDBKeyGets; + private @Metric MutableCounterLong numDBKeyGetIfExistChecks; + private @Metric MutableCounterLong numDBKeyGetIfExistMisses; + private @Metric MutableCounterLong numDBKeyGetIfExistGets; + // WAL Update data size and sequence count + private @Metric MutableCounterLong walUpdateDataSize; + private @Metric MutableCounterLong walUpdateSequenceCount; + public RDBMetrics() { } @@ -47,17 +57,6 @@ public static synchronized RDBMetrics create() { return instance; } - private @Metric MutableCounterLong numDBKeyMayExistChecks; - private @Metric MutableCounterLong numDBKeyMayExistMisses; - - private @Metric MutableCounterLong numDBKeyGets; - private @Metric MutableCounterLong numDBKeyGetIfExistChecks; - private @Metric MutableCounterLong numDBKeyGetIfExistMisses; - private @Metric MutableCounterLong numDBKeyGetIfExistGets; - // WAL Update data size and sequence count - private @Metric MutableCounterLong walUpdateDataSize; - private @Metric MutableCounterLong walUpdateSequenceCount; - public long getNumDBKeyGets() { return numDBKeyGets.value(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java index 59c6661129ef..107b71ee5045 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreCodecBufferIterator.java @@ -26,63 +26,7 @@ /** * Implement {@link RDBStoreAbstractIterator} using {@link CodecBuffer}. */ -class RDBStoreCodecBufferIterator - extends RDBStoreAbstractIterator { - static class Buffer { - private final CodecBuffer.Capacity initialCapacity; - private final PutToByteBuffer source; - private CodecBuffer buffer; - - Buffer(CodecBuffer.Capacity initialCapacity, - PutToByteBuffer source) { - this.initialCapacity = initialCapacity; - this.source = source; - } - - void release() { - if (buffer != null) { - buffer.release(); - } - } - - private void prepare() { - if (buffer == null) { - allocate(); - } else { - buffer.clear(); - } - } - - private void allocate() { - if (buffer != null) { - buffer.release(); - } - buffer = CodecBuffer.allocateDirect(-initialCapacity.get()); - } - - CodecBuffer getFromDb() { - for (prepare(); ; allocate()) { - final Integer required = buffer.putFromSource(source); - if (required == null) { - return null; // the source is unavailable - } else if (required == buffer.readableBytes()) { - return buffer; // buffer size is big enough - } - // buffer size too small, try increasing the capacity. - if (buffer.setCapacity(required)) { - buffer.clear(); - // retry with the new capacity - final int retried = buffer.putFromSource(source); - Preconditions.assertSame(required.intValue(), retried, "required"); - return buffer; - } - - // failed to increase the capacity - // increase initial capacity and reallocate it - initialCapacity.increase(required); - } - } - } +class RDBStoreCodecBufferIterator extends RDBStoreAbstractIterator { private final Buffer keyBuffer; private final Buffer valueBuffer; @@ -152,4 +96,60 @@ public void close() { valueBuffer.release(); } } + + static class Buffer { + private final CodecBuffer.Capacity initialCapacity; + private final PutToByteBuffer source; + private CodecBuffer buffer; + + Buffer(CodecBuffer.Capacity initialCapacity, + PutToByteBuffer source) { + this.initialCapacity = initialCapacity; + this.source = source; + } + + void release() { + if (buffer != null) { + buffer.release(); + } + } + + private void prepare() { + if (buffer == null) { + allocate(); + } else { + buffer.clear(); + } + } + + private void allocate() { + if (buffer != null) { + buffer.release(); + } + buffer = CodecBuffer.allocateDirect(-initialCapacity.get()); + } + + CodecBuffer getFromDb() { + for (prepare(); ; allocate()) { + final Integer required = buffer.putFromSource(source); + if (required == null) { + return null; // the source is unavailable + } else if (required == buffer.readableBytes()) { + return buffer; // buffer size is big enough + } + // buffer size too small, try increasing the capacity. + if (buffer.setCapacity(required)) { + buffer.clear(); + // retry with the new capacity + final int retried = buffer.putFromSource(source); + Preconditions.assertSame(required.intValue(), retried, "required"); + return buffer; + } + + // failed to increase the capacity + // increase initial capacity and reallocate it + initialCapacity.increase(required); + } + } + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RawKeyValue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RawKeyValue.java index 7911314b114c..c7250617b6f9 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RawKeyValue.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RawKeyValue.java @@ -26,6 +26,10 @@ * @param The raw type. */ public abstract class RawKeyValue implements KeyValue { + + private final RAW key; + private final RAW value; + /** * Create a KeyValue pair. * @@ -58,9 +62,6 @@ public byte[] getValue() { } } - private final RAW key; - private final RAW value; - private RawKeyValue(RAW key, RAW value) { this.key = key; this.value = value; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index 4ee7dcef6516..cf0c84f375e3 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -76,11 +76,28 @@ public final class RocksDatabase implements Closeable { static final Logger LOG = LoggerFactory.getLogger(RocksDatabase.class); public static final String ESTIMATE_NUM_KEYS = "rocksdb.estimate-num-keys"; + static { ManagedRocksObjectUtils.loadRocksDBLibrary(); } - private static final ManagedReadOptions DEFAULT_READ_OPTION = - new ManagedReadOptions(); + + private static final ManagedReadOptions DEFAULT_READ_OPTION = new ManagedReadOptions(); + + private final String name; + private final Throwable creationStackTrace = new Throwable("Object creation stack trace"); + + private final ManagedRocksDB db; + private final ManagedDBOptions dbOptions; + private final ManagedWriteOptions writeOptions; + private final List descriptors; + /** column family names -> {@link ColumnFamily}. */ + private final Map columnFamilies; + /** {@link ColumnFamilyHandle#getID()} -> column family names. */ + private final Supplier> columnFamilyNames; + + private final AtomicBoolean isClosed = new AtomicBoolean(); + /** Count the number of operations running concurrently. */ + private final AtomicLong counter = new AtomicLong(); static String bytes2String(byte[] bytes) { return StringCodec.get().fromPersistedFormat(bytes); @@ -331,22 +348,6 @@ public String toString() { } } - private final String name; - private final Throwable creationStackTrace = new Throwable("Object creation stack trace"); - - private final ManagedRocksDB db; - private final ManagedDBOptions dbOptions; - private final ManagedWriteOptions writeOptions; - private final List descriptors; - /** column family names -> {@link ColumnFamily}. */ - private final Map columnFamilies; - /** {@link ColumnFamilyHandle#getID()} -> column family names. */ - private final Supplier> columnFamilyNames; - - private final AtomicBoolean isClosed = new AtomicBoolean(); - /** Count the number of operations running concurrently. */ - private final AtomicLong counter = new AtomicLong(); - private RocksDatabase(File dbFile, ManagedRocksDB db, ManagedDBOptions dbOptions, ManagedWriteOptions writeOptions, List descriptors, List handles) throws RocksDBException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java index a5fc4ec61450..46278f8d468a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java @@ -27,10 +27,6 @@ * Class that maintains Table Configuration. */ public class TableConfig implements AutoCloseable { - static TableConfig newTableConfig(String name) { - return new TableConfig(name, - DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE.getColumnFamilyOptions()); - } private final String name; private final ManagedColumnFamilyOptions columnFamilyOptions; @@ -50,6 +46,11 @@ public TableConfig(String name, this.columnFamilyOptions = columnFamilyOptions; } + static TableConfig newTableConfig(String name) { + return new TableConfig(name, + DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE.getColumnFamilyOptions()); + } + /** * Returns the Name for this Table. * @return - Name String diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java index 29d174ee2a1f..c32a37c787e1 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java @@ -25,6 +25,16 @@ */ public final class CacheValue { + private final VALUE value; + // This value is used for evict entries from cache. + // This value is set with ratis transaction context log entry index. + private final long epoch; + + private CacheValue(long epoch, VALUE value) { + this.value = value; + this.epoch = epoch; + } + /** @return a {@link CacheValue} with a non-null value. */ public static CacheValue get(long epoch, V value) { Objects.requireNonNull(value, "value == null"); @@ -36,16 +46,6 @@ public static CacheValue get(long epoch) { return new CacheValue<>(epoch, null); } - private final VALUE value; - // This value is used for evict entries from cache. - // This value is set with ratis transaction context log entry index. - private final long epoch; - - private CacheValue(long epoch, VALUE value) { - this.value = value; - this.epoch = epoch; - } - public VALUE getCacheValue() { return value; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java index 7ffa37956854..5e9921e507ce 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java @@ -58,6 +58,35 @@ public class GrpcMetrics implements MetricsSource { private final boolean grpcQuantileEnable; private String requestType; + @Metric("Number of sent bytes") + private MutableCounterLong sentBytes; + + @Metric("Number of received bytes") + private MutableCounterLong receivedBytes; + + @Metric("Number of unknown messages sent") + private MutableCounterLong unknownMessagesSent; + + @Metric("Number of unknown messages received") + private MutableCounterLong unknownMessagesReceived; + + @Metric("Queue time") + private MutableRate grpcQueueTime; + + // There should be no getter method to avoid + // exposing internal representation. FindBugs error raised. + private MutableQuantiles[] grpcQueueTimeMillisQuantiles; + + @Metric("Processing time") + private MutableRate grpcProcessingTime; + + // There should be no getter method to avoid + // exposing internal representation. FindBugs error raised. + private MutableQuantiles[] grpcProcessingTimeMillisQuantiles; + + @Metric("Number of active clients connected") + private MutableCounterLong numOpenClientConnections; + public GrpcMetrics(Configuration conf) { this.registry = new MetricsRegistry("grpc"); this.requestType = "NoRequest"; @@ -111,35 +140,6 @@ public synchronized void getMetrics(MetricsCollector collector, boolean all) { recordBuilder.tag(LATEST_REQUEST_TYPE, requestType); } - @Metric("Number of sent bytes") - private MutableCounterLong sentBytes; - - @Metric("Number of received bytes") - private MutableCounterLong receivedBytes; - - @Metric("Number of unknown messages sent") - private MutableCounterLong unknownMessagesSent; - - @Metric("Number of unknown messages received") - private MutableCounterLong unknownMessagesReceived; - - @Metric("Queue time") - private MutableRate grpcQueueTime; - - // There should be no getter method to avoid - // exposing internal representation. FindBugs error raised. - private MutableQuantiles[] grpcQueueTimeMillisQuantiles; - - @Metric("Processing time") - private MutableRate grpcProcessingTime; - - // There should be no getter method to avoid - // exposing internal representation. FindBugs error raised. - private MutableQuantiles[] grpcProcessingTimeMillisQuantiles; - - @Metric("Number of active clients connected") - private MutableCounterLong numOpenClientConnections; - public void incrSentBytes(long byteCount) { sentBytes.incr(byteCount); }